diff --git "a/2025/A Geometric Framework for Understanding Memorization in Generative Models/layout.json" "b/2025/A Geometric Framework for Understanding Memorization in Generative Models/layout.json" new file mode 100644--- /dev/null +++ "b/2025/A Geometric Framework for Understanding Memorization in Generative Models/layout.json" @@ -0,0 +1,33926 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 116 + ], + "type": "text", + "content": "A GEOMETRIC FRAMEWORK FOR UNDERSTANDING MEMORIZATION IN GENERATIVE MODELS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 135, + 434, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 135, + 434, + 158 + ], + "spans": [ + { + "bbox": [ + 110, + 135, + 434, + 158 + ], + "type": "text", + "content": "Brendan Leigh Ross, Hamidreza Kamkari, Tongzi Wu, Rasa Hosseinzadeh, Zhaoyan Liu, George Stein, Jesse C. Cresswell, Gabriel Loaiza-Ganem" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 158, + 158, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 158, + 158, + 168 + ], + "spans": [ + { + "bbox": [ + 112, + 158, + 158, + 168 + ], + "type": "text", + "content": "Layer 6 AI" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 169, + 424, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 169, + 424, + 180 + ], + "spans": [ + { + "bbox": [ + 112, + 169, + 424, + 180 + ], + "type": "text", + "content": "{brendan,hamid,tongzi,rasa,zhaoyan,george,jesse,gabriel}@layer6.ai" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 276, + 209, + 334, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 209, + 334, + 220 + ], + "spans": [ + { + "bbox": [ + 276, + 209, + 334, + 220 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 234, + 470, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 234, + 470, + 411 + ], + "spans": [ + { + "bbox": [ + 140, + 234, + 470, + 411 + ], + "type": "text", + "content": "As deep generative models have progressed, recent work has shown them to be capable of memorizing and reproducing training datapoints when deployed. These findings call into question the usability of generative models, especially in light of the legal and privacy risks brought about by memorization. To better understand this phenomenon, we propose the manifold memorization hypothesis (MMH), a geometric framework which leverages the manifold hypothesis into a clear language in which to reason about memorization. We propose to analyze memorization in terms of the relationship between the dimensionalities of " + }, + { + "bbox": [ + 140, + 234, + 470, + 411 + ], + "type": "inline_equation", + "content": "(i)" + }, + { + "bbox": [ + 140, + 234, + 470, + 411 + ], + "type": "text", + "content": " the ground truth data manifold and " + }, + { + "bbox": [ + 140, + 234, + 470, + 411 + ], + "type": "inline_equation", + "content": "(ii)" + }, + { + "bbox": [ + 140, + 234, + 470, + 411 + ], + "type": "text", + "content": " the manifold learned by the model. This framework provides a formal standard for \"how memorized\" a datapoint is and systematically categorizes memorized data into two types: memorization driven by overfitting and memorization driven by the underlying data distribution. By analyzing prior work in the context of the MMH, we explain and unify assorted observations in the literature. We empirically validate the MMH using synthetic data and image datasets up to the scale of Stable Diffusion, developing new tools for detecting and preventing generation of memorized samples in the process." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 434, + 206, + 446 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 434, + 206, + 446 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 206, + 446 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "text", + "content": "Suppose " + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\{x_i\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "text", + "content": " is a dataset in " + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "text", + "content": " drawn independently from a ground truth probability distribution " + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "inline_equation", + "content": "p_*(x)" + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "text", + "content": ". A deep generative model (DGM) is a probability distribution " + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "inline_equation", + "content": "p_\\theta(x)" + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "text", + "content": " designed to capture " + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "inline_equation", + "content": "p_*(x)" + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "text", + "content": " only from knowledge of " + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "inline_equation", + "content": "\\{x_i\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 456, + 506, + 556 + ], + "type": "text", + "content": ". DGMs, and most famously, diffusion models (DMs; Sohl-Dickstein et al., 2015; Ho et al., 2020), have led the \"generative AI\" boom with their ability to generate realistic images from text prompts (Karras et al., 2019; Rombach et al., 2022). DMs are thus likely to be deployed in an increasing number of public-facing or safety-critical applications. However, with sufficient model capacity, DGMs are known to memorize some of their training data. Memorization occurs at various degrees of specificity, including identities of brands, layouts of specific scenes, or exact copies of images (Webster et al., 2021; Somepalli et al., 2023a; Carlini et al., 2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 561, + 506, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 561, + 506, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 561, + 506, + 661 + ], + "type": "text", + "content": "Memorization is undesirable for myriad reasons. Simply put, the more a model reproduces its training data, the less useful it becomes. Memorization is a modelling failure under the DGM definition provided above; if the underlying ground truth " + }, + { + "bbox": [ + 104, + 561, + 506, + 661 + ], + "type": "inline_equation", + "content": "p_{*}(x)" + }, + { + "bbox": [ + 104, + 561, + 506, + 661 + ], + "type": "text", + "content": " does not place positive probability mass on individual datapoints, then a " + }, + { + "bbox": [ + 104, + 561, + 506, + 661 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 561, + 506, + 661 + ], + "type": "text", + "content": " that memorizes any datapoint must be failing to generalize (Yoon et al., 2023). But memorization's risks go beyond mere utility. Training datasets may contain private information which, if memorized, might be exposed in downstream applications. Copyright law includes \"substantial similarity\" between generated and training data as a criterion in its definition of infringement, meaning that reproduced training samples can open up model builders or users to legal liability. For instance, the recent legal decision by Orrick (2023) hinged on this criterion." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "The increasing dependence of society on generative models and resulting risks call for work to better understand memorization. Recent empirical work has identified mechanistic causes of memorization including but not limited to data complexity, duplication of training points, and highly specific labels (Somepalli et al., 2023b; Gu et al., 2023). We group these insights under the umbrella of \"memorization phenomena\", a catch-all term for the various interesting memorization-related observations we would like to understand better. Though useful in practice, these memorization" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 129, + 81, + 245, + 168 + ], + "blocks": [ + { + "bbox": [ + 129, + 81, + 245, + 168 + ], + "lines": [ + { + "bbox": [ + 129, + 81, + 245, + 168 + ], + "spans": [ + { + "bbox": [ + 129, + 81, + 245, + 168 + ], + "type": "image", + "image_path": "b361b8a7dc2d0481e66a36c90dc0fbbec8954cc58af02188cf3f37b288edff13.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 248, + 81, + 364, + 169 + ], + "blocks": [ + { + "bbox": [ + 248, + 81, + 364, + 169 + ], + "lines": [ + { + "bbox": [ + 248, + 81, + 364, + 169 + ], + "spans": [ + { + "bbox": [ + 248, + 81, + 364, + 169 + ], + "type": "image", + "image_path": "34b49cb8fe8f150ff46b2cbb381b1f71c0d341a6a96dbdaa168fd7b45d02b7f6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 257, + 173, + 353, + 183 + ], + "lines": [ + { + "bbox": [ + 257, + 173, + 353, + 183 + ], + "spans": [ + { + "bbox": [ + 257, + 173, + 353, + 183 + ], + "type": "text", + "content": "(b) OD-Mem with " + }, + { + "bbox": [ + 257, + 173, + 353, + 183 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x) = 1" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 366, + 81, + 484, + 168 + ], + "blocks": [ + { + "bbox": [ + 366, + 81, + 484, + 168 + ], + "lines": [ + { + "bbox": [ + 366, + 81, + 484, + 168 + ], + "spans": [ + { + "bbox": [ + 366, + 81, + 484, + 168 + ], + "type": "image", + "image_path": "34a7d86923679c8e1649f5ea3b98fe4b6347abefc210f1e6a725ce630e550dd1.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 129, + 184, + 244, + 270 + ], + "blocks": [ + { + "bbox": [ + 138, + 173, + 235, + 183 + ], + "lines": [ + { + "bbox": [ + 138, + 173, + 235, + 183 + ], + "spans": [ + { + "bbox": [ + 138, + 173, + 235, + 183 + ], + "type": "text", + "content": "(a) OD-Mem with " + }, + { + "bbox": [ + 138, + 173, + 235, + 183 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x) = 0" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 129, + 184, + 244, + 270 + ], + "lines": [ + { + "bbox": [ + 129, + 184, + 244, + 270 + ], + "spans": [ + { + "bbox": [ + 129, + 184, + 244, + 270 + ], + "type": "image", + "image_path": "1832bb16c51bd1e88be974f9f64e1c17dc5b00f159c01d38e0e57ce25e5881d5.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 275, + 233, + 285 + ], + "lines": [ + { + "bbox": [ + 137, + 275, + 233, + 285 + ], + "spans": [ + { + "bbox": [ + 137, + 275, + 233, + 285 + ], + "type": "text", + "content": "(d) DD-Mem with " + }, + { + "bbox": [ + 137, + 275, + 233, + 285 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x) = 0" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 247, + 185, + 362, + 271 + ], + "blocks": [ + { + "bbox": [ + 365, + 173, + 483, + 183 + ], + "lines": [ + { + "bbox": [ + 365, + 173, + 483, + 183 + ], + "spans": [ + { + "bbox": [ + 365, + 173, + 483, + 183 + ], + "type": "text", + "content": "(c) No memorization with " + }, + { + "bbox": [ + 365, + 173, + 483, + 183 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x) = 2" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 247, + 185, + 362, + 271 + ], + "lines": [ + { + "bbox": [ + 247, + 185, + 362, + 271 + ], + "spans": [ + { + "bbox": [ + 247, + 185, + 362, + 271 + ], + "type": "image", + "image_path": "e9208b0a5e62d3bfad4df6d5322f4a05b0d177f2257c2abb0167a253b95dcce3.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 256, + 275, + 351, + 285 + ], + "lines": [ + { + "bbox": [ + 256, + 275, + 351, + 285 + ], + "spans": [ + { + "bbox": [ + 256, + 275, + 351, + 285 + ], + "type": "text", + "content": "(e) DD-Mem with " + }, + { + "bbox": [ + 256, + 275, + 351, + 285 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x) = 1" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 367, + 185, + 484, + 271 + ], + "blocks": [ + { + "bbox": [ + 367, + 185, + 484, + 271 + ], + "lines": [ + { + "bbox": [ + 367, + 185, + 484, + 271 + ], + "spans": [ + { + "bbox": [ + 367, + 185, + 484, + 271 + ], + "type": "image", + "image_path": "650e0e3bc94b18ad1a07066ce3cac423fb62eafb5a810c7f3e99198ce38a6e86.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 369, + 275, + 482, + 285 + ], + "lines": [ + { + "bbox": [ + 369, + 275, + 482, + 285 + ], + "spans": [ + { + "bbox": [ + 369, + 275, + 482, + 285 + ], + "type": "text", + "content": "(f) Poorly fit model, LIDs are irrelevant" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 106, + 416, + 156, + 467 + ], + "blocks": [ + { + "bbox": [ + 106, + 416, + 156, + 467 + ], + "lines": [ + { + "bbox": [ + 106, + 416, + 156, + 467 + ], + "spans": [ + { + "bbox": [ + 106, + 416, + 156, + 467 + ], + "type": "image", + "image_path": "7233c22948935981f21c5e1f572077a4bf7cf429239d73d41e7e9e46397cde0b.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 474, + 504, + 505 + ], + "lines": [ + { + "bbox": [ + 104, + 474, + 504, + 505 + ], + "spans": [ + { + "bbox": [ + 104, + 474, + 504, + 505 + ], + "type": "text", + "content": "Figure 2: 8 images along a relatively low-dimensional manifold learned by Stable Diffusion v1.5. The first is a real image from LAION (flagged as memorized by Webster (2023)), and the remainder were generated by the model." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 156, + 416, + 205, + 467 + ], + "blocks": [ + { + "bbox": [ + 156, + 416, + 205, + 467 + ], + "lines": [ + { + "bbox": [ + 156, + 416, + 205, + 467 + ], + "spans": [ + { + "bbox": [ + 156, + 416, + 205, + 467 + ], + "type": "image", + "image_path": "8d2e3c34c3dd74aa7d5a2d4bc90014a7b3b164da3a8847f7118474eed2a42965.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 206, + 416, + 255, + 467 + ], + "blocks": [ + { + "bbox": [ + 206, + 416, + 255, + 467 + ], + "lines": [ + { + "bbox": [ + 206, + 416, + 255, + 467 + ], + "spans": [ + { + "bbox": [ + 206, + 416, + 255, + 467 + ], + "type": "image", + "image_path": "7ddcb728453dca1ca0214354fd126fe6ede89e81816354420f2f54b268cc6529.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 255, + 416, + 305, + 467 + ], + "blocks": [ + { + "bbox": [ + 255, + 416, + 305, + 467 + ], + "lines": [ + { + "bbox": [ + 255, + 416, + 305, + 467 + ], + "spans": [ + { + "bbox": [ + 255, + 416, + 305, + 467 + ], + "type": "image", + "image_path": "cadb84032476ba747a129e349f2c8124fd103aa9e1bc2a53cf4d53dba9b180e5.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 306, + 416, + 354, + 467 + ], + "blocks": [ + { + "bbox": [ + 306, + 416, + 354, + 467 + ], + "lines": [ + { + "bbox": [ + 306, + 416, + 354, + 467 + ], + "spans": [ + { + "bbox": [ + 306, + 416, + 354, + 467 + ], + "type": "image", + "image_path": "d722b7d1a57ad0e15f36242dda2b242da13ce444e41ac20dcde5589256a7f9ac.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 355, + 416, + 404, + 467 + ], + "blocks": [ + { + "bbox": [ + 104, + 293, + 506, + 405 + ], + "lines": [ + { + "bbox": [ + 104, + 293, + 506, + 405 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 506, + 405 + ], + "type": "text", + "content": "Figure 1: An illustrative example of LID values for models with different quality of fit and degrees of memorization. In these plots, the ground truth manifold " + }, + { + "bbox": [ + 104, + 293, + 506, + 405 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 293, + 506, + 405 + ], + "type": "text", + "content": " is depicted in light blue, training samples " + }, + { + "bbox": [ + 104, + 293, + 506, + 405 + ], + "type": "inline_equation", + "content": "\\{x_{i}\\}_{i = 1}^{n}\\subset \\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 293, + 506, + 405 + ], + "type": "text", + "content": " are depicted as crosses, and the model manifolds " + }, + { + "bbox": [ + 104, + 293, + 506, + 405 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 104, + 293, + 506, + 405 + ], + "type": "text", + "content": " are depicted in red. In (a) and (d), the model assigns 0-dimensional point masses around the three leftmost datapoints, indicating that it will reproduce them directly at test time; however in the former case this is caused by overfitting " + }, + { + "bbox": [ + 104, + 293, + 506, + 405 + ], + "type": "inline_equation", + "content": "(\\mathrm{LID}_{\\theta}(x) < \\mathrm{LID}_{*}(x))" + }, + { + "bbox": [ + 104, + 293, + 506, + 405 + ], + "type": "text", + "content": ", while in the latter case it is caused by the ground truth data having small LID. The models in (b) and (e) are analogous to (a) and (b), respectively, and still memorize, but with an extra degree of freedom in the form of a 1-dimensional submanifold containing the three points. Only the model in (c), which has learned a 2-dimensional manifold through its full support, has generalized well enough and has learned a manifold of high enough dimension to avoid both types of memorization. Finally, (f) shows a poorly fit model where LID and memorization are not meaningfully related." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 355, + 416, + 404, + 467 + ], + "lines": [ + { + "bbox": [ + 355, + 416, + 404, + 467 + ], + "spans": [ + { + "bbox": [ + 355, + 416, + 404, + 467 + ], + "type": "image", + "image_path": "eac214e57f973beabf81c600114efa65e45469c48aed46a1b6236740286efaac.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 405, + 416, + 454, + 467 + ], + "blocks": [ + { + "bbox": [ + 405, + 416, + 454, + 467 + ], + "lines": [ + { + "bbox": [ + 405, + 416, + 454, + 467 + ], + "spans": [ + { + "bbox": [ + 405, + 416, + 454, + 467 + ], + "type": "image", + "image_path": "b6f9f5a614d1eaf3afc612d59553c9f1ccda64137dde37b2f36f4458a1d732f5.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 454, + 416, + 504, + 467 + ], + "blocks": [ + { + "bbox": [ + 454, + 416, + 504, + 467 + ], + "lines": [ + { + "bbox": [ + 454, + 416, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 454, + 416, + 504, + 467 + ], + "type": "image", + "image_path": "b8e574f13f4266f8a4dd8e660af41740a7bc10602a6215804ee3b8459ddf1feb.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 528, + 506, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 528, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 528, + 506, + 606 + ], + "type": "text", + "content": "phenomena have yet to be unified and interpreted under a single theoretical framework. Meanwhile, formal treatments of memorization have led to isolated usecases such as detection (Meehan et al., 2020; Bhattacharjee et al., 2023) and prevention on a model level (Vyas et al., 2023), but have provided little explanatory power for memorization phenomena. In addition to providing theoretical insights, a unifying framework could yield more capabilities such as identifying whether a training image has been memorized, altering the sampling process to reduce memorization, and detecting memorized generations post hoc." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "content": "In this work, we introduce the manifold memorization hypothesis (MMH), a geometric framework to explain memorization. In short, we propose that memorization occurs at a point " + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "content": " when the manifold learned by the generative model contains " + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "content": " but has too small a dimensionality at " + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "content": ". As we will see, this understudied perspective is a natural take on memorization that leads to practical insights and effectively explains memorization phenomena like those mentioned above. Although we mainly focus on DMs, the most notorious memorizers, our geometric framework applies to any DGM on a continuous data space " + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "content": "; indeed, we empirically validate it on generative adversarial networks (GANs; Goodfellow et al., 2014; Karras et al., 2019) as well. Pidstrigach (2022) was the first to show that DMs are capable of learning low-dimensional structure in " + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "content": " and that this manifold learning capability is a driver of memorization; in this sense, our work extends this connection into a general framework, grounds it in empirical findings, and connects it to recent work on memorization." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 367, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 367, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 367, + 95 + ], + "type": "text", + "content": "This paper is organized according to the following contributions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 128, + 102, + 506, + 258 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 129, + 102, + 504, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 102, + 504, + 125 + ], + "spans": [ + { + "bbox": [ + 129, + 102, + 504, + 125 + ], + "type": "text", + "content": "1. We advance the MMH in Section 2. After defining the key notions of the data manifold and local intrinsic dimension (LID), we describe how LIDs correspond to memorization." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 128, + 128, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 128, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 128, + 128, + 506, + 172 + ], + "type": "text", + "content": "2. We demonstrate the explanatory power of the MMH in Section 3 by grounding it in prior observations about the behaviour of models that memorize. As this section will show, memorization phenomena observed in past work can be predicted and explained by the MMH." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 128, + 175, + 504, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 175, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 128, + 175, + 504, + 220 + ], + "type": "text", + "content": "3. In Subsection 4.1, we empirically test the MMH, showing that it both accurately describes reality and is useful in practice. As predicted by the MMH, estimates of LID are strongly predictive of memorization at scales ranging from 2-dimensional synthetic data to Stable Diffusion (Rombach et al., 2022)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 128, + 223, + 504, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 223, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 128, + 223, + 504, + 258 + ], + "type": "text", + "content": "4. Finally, inspired by the MMH, in Subsection 4.2 we devise scalable approaches to avert memorization during sampling from Stable Diffusion and to identify tokens in the text conditioning that contribute to memorization." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 271, + 382, + 284 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 271, + 382, + 284 + ], + "spans": [ + { + "bbox": [ + 105, + 271, + 382, + 284 + ], + "type": "text", + "content": "2 UNDERSTANDING MEMORIZATION THROUGH LID" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "text", + "content": "Preliminaries Here we presume the manifold hypothesis: that data of interest lies on a manifold " + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{M} \\subset \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "text", + "content": " (Bengio et al., 2013). We take a generalized definition of manifold in which " + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "text", + "content": " is allowed to have different dimensionalities in different regions, which is appropriate for realistic, heterogeneous data with varying degrees of structure and complexity. In particular, we assume that both our ground truth distribution " + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "inline_equation", + "content": "p_*(x)" + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "text", + "content": " and our model " + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "inline_equation", + "content": "p_\\theta(x)" + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "text", + "content": " produce samples on manifolds, which we refer to as " + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_*" + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_\\theta" + }, + { + "bbox": [ + 104, + 292, + 506, + 381 + ], + "type": "text", + "content": " respectively. We direct readers to Loaiza-Ganem et al. (2024) for a justification and formal mathematical treatment of both of these assumptions, which are especially valid when the data is high-dimensional and the models are high-performing ones such as DMs and GANs." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": "Our framework for understanding memorization revolves around the notion of a point's local intrinsic dimension (LID). Given a manifold " + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": " and a point " + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "inline_equation", + "content": "x\\in \\mathcal{M}" + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": ", we define the LID of " + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}(x)" + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": ", with respect to " + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": " as the dimensionality of " + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": ". In this work, we will mainly consider the LIDs of points " + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "inline_equation", + "content": "x\\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": " with respect to two specific manifolds: " + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": ". We will refer to these quantities as " + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x)" + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 385, + 506, + 443 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "spans": [ + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "text", + "content": "Intuition and the Manifold Hypothesis Before discussing our framework, we review some intuition relating the manifold hypothesis to practical datasets. Manifold structure " + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "inline_equation", + "content": "\\mathcal{M} \\subset \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "text", + "content": " arises from sets of constraints. These can range from very simple, like a set of linear constraints (" + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "inline_equation", + "content": "\\mathcal{M} = \\{x \\mid Ax = b\\}" + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "text", + "content": "), to highly complex (" + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "inline_equation", + "content": "\\mathcal{M} = \\{x \\mid x \\text{ is an image of a face}\\}" + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "text", + "content": "). Locally at a point " + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{M}" + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "text", + "content": ", each constraint determines a direction one cannot move without leaving the manifold and violating the structure of the dataset. Hence, a region governed by " + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "inline_equation", + "content": "\\ell" + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "text", + "content": " independent and active constraints will have dimensionality " + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}(x) = d - \\ell" + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "text", + "content": ". The value of " + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}(x)" + }, + { + "bbox": [ + 104, + 446, + 506, + 622 + ], + "type": "text", + "content": " can be intuited as the number of degrees of freedom - valid independent directions of movement in which the characteristics of the dataset are preserved. Another connection is to complexity. For example, estimates of LID from algorithms like FLIPD (Kamkari et al., 2024b) or the normal bundle (NB) method of Stanczuk et al. (2024) (which we use in our experiments; see Appendix B for details) have been shown to correspond closely with the complexity of an image; it is reasonable to expect that images with more complex features can endure more changes (such as morphing, moving, or changing the colours of different parts of the image) without losing coherence. The notions of constraints, degrees of freedom, and complexity along with their relationship to LID will help us understand its connection to memorization in later sections." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "type": "text", + "content": "A Geometric Framework for Understanding Memorization In this section we formulate a framework for understanding memorization based on comparisons between " + }, + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x)" + }, + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "type": "text", + "content": ". As a motivating example, consider Figure 1, which depicts six possible models " + }, + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "type": "text", + "content": " trained on datasets " + }, + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "type": "inline_equation", + "content": "\\{x_i\\}_{i=1}^n" + }, + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "type": "text", + "content": " that each lie on a ground truth manifold " + }, + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_*" + }, + { + "bbox": [ + 104, + 628, + 507, + 673 + ], + "type": "text", + "content": ". In the first scenario, Figure 1a, the" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 679, + 504, + 711 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 679, + 504, + 711 + ], + "spans": [ + { + "bbox": [ + 104, + 679, + 504, + 711 + ], + "type": "text", + "content": "1Most authors define a manifold to have a constant dimension over the entire set. Under this common definition, our assumption is referred to as the union of manifolds hypothesis (Brown et al., 2023). We use a more general definition of manifold for brevity." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 710, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 710, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 710, + 504, + 732 + ], + "type": "text", + "content": "This statement is captured formally by the regular level set theorem of differential geometry, and manifolds can be modelled as such (Lee, 2012; Ross et al., 2023)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": "model " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": " has precisely memorized some of the training data. This is a well-understood mode of memorization; training datapoints are exactly reproduced. To achieve this, the model has learned a 0-dimensional manifold around these datapoints. To our knowledge, Pidstrigach (2022) was the first to point out that a model capable of learning 0-dimensional manifolds can memorize the training data. From this example, we infer that " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": " can be perfectly reproduced when " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x) = 0" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": ". This indicates suboptimality in the model at the datapoints shown, for which " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x) = 2" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "text", + "content": "However, memorization can be more complex than simply reproducing a datapoint. For example, Somepalli et al. (2023a) identify instances where layouts, styles, or foreground or background objects in training images are copied without copying the entire image, a phenomenon they refer to as reconstructive memory. Webster (2023) surfaces more instances of the same phenomenon and refers to them as template verbatim. See Figure 2 for an example. In the region of these points " + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "text", + "content": ", the model is able to generate images with degrees of freedom in some attributes (e.g., colour or texture), but is too constrained in other attributes (e.g., layout, style, or content). Geometrically, " + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "text", + "content": " is too constrained compared to the idealized ground truth manifold " + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "text", + "content": "; i.e., " + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x) < \\mathrm{LID}_{*}(x)" + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "text", + "content": ". We depict this situation in Figure 1b, wherein the model has erroneously assigned " + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x) = 1" + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "text", + "content": " for some of the training datapoints." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 270, + 504, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 270, + 504, + 326 + ], + "spans": [ + { + "bbox": [ + 104, + 270, + 504, + 326 + ], + "type": "text", + "content": "Two Types of Memorization We expect two types of memorization to be of interest. An academic interested in designing DGMs that learn the ground truth distribution correctly will chiefly be interested in avoiding the memorization scenario " + }, + { + "bbox": [ + 104, + 270, + 504, + 326 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x) < \\mathrm{LID}_{*}(x)" + }, + { + "bbox": [ + 104, + 270, + 504, + 326 + ], + "type": "text", + "content": ". We refer to this first scenario as overfitting-driven memorization (OD-Mem). This situation represents a modelling failure in that " + }, + { + "bbox": [ + 104, + 270, + 504, + 326 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 270, + 504, + 326 + ], + "type": "text", + "content": " is not generalizing correctly to " + }, + { + "bbox": [ + 104, + 270, + 504, + 326 + ], + "type": "inline_equation", + "content": "p_{*}(x)" + }, + { + "bbox": [ + 104, + 270, + 504, + 326 + ], + "type": "text", + "content": ", and is illustrated in Figure 1a and Figure 1b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "spans": [ + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "text", + "content": "However, an industry practitioner deploying a consumer-facing model might be more interested in hypothetical values of " + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "text", + "content": " per se, irrespective of the values of " + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{*}" + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "text", + "content": ". For any points " + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "text", + "content": " containing trademarked or private information, low values of " + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "text", + "content": " will be of concern even if " + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x) = \\mathrm{LID}_{*}(x)" + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "text", + "content": ", as this information is likely to be revealed in samples generated from this region. A practitioner would rightly refer to this situation as memorization despite the model generalizing correctly. We refer to this second scenario as data-driven memorization (DD-Mem), and illustrate it in Figure 1d and Figure 1e. This certainly happens in practice; for example, conditioning on the title of a specific artwork (e.g. \"The Great Wave off Kanagawa\" by Katsushika Hokusai (Somepalli et al., 2023a)) is a very strong constraint, leaving few degrees of freedom in the ground truth manifold " + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 331, + 506, + 474 + ], + "type": "text", + "content": ", but reproducing specific artworks may be undesirable in a production model. Unlike OD-Mem, DD-Mem is not overfitting in the classical sense, and a notable consequence is that it cannot be detected by comparing training and test likelihoods. We refer to the conceptualization of how LIDs relate to memorization through OD-Mem and DD-Mem as the manifold memorization hypothesis." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "text", + "content": "No memorization is present in Figure 1c, in which the model manifold " + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "text", + "content": " matches the ground truth manifold " + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "text", + "content": " and both have sufficiently high LID. We highlight that the MMH assumes high-performing models whose manifold " + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "text", + "content": " is roughly aligned with the data manifold " + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "text", + "content": "; when this is not the case, as in Figure 1f, " + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "text", + "content": " and its relationship to " + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{*}" + }, + { + "bbox": [ + 104, + 479, + 506, + 525 + ], + "type": "text", + "content": " become irrelevant to memorization." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 539, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 506, + 639 + ], + "type": "text", + "content": "Why is the MMH Useful? The MMH is a hypothesis about how memorization occurs in practice for high-dimensional data. Its utility is best framed in contrast to past treatments of memorization. First, while past theoretical frameworks for memorization have focused on probability mass, our geometric perspective leads to more practical tools. For example, Bhattacharjee et al. (2023) propose a purely probabilistic definition of memorization that can be detected only with access to the training dataset and the ability to generate large numbers of samples, which are intractable requirements at the scale of LAION-2B (Schuhmann et al., 2022) and Stable Diffusion. In contrast, the MMH suggests that memorization can be detected through " + }, + { + "bbox": [ + 104, + 539, + 506, + 639 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 539, + 506, + 639 + ], + "type": "text", + "content": ", for which tractable estimators exist at scale. We explore these estimators in Section 4." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 733 + ], + "type": "text", + "content": "Second, the MMH explains and quantifies the phenomenon depicted in Figure 2: reconstructive memorization. While it has been studied in the past (Somepalli et al., 2023a; Webster, 2023; Wen et al., 2023), it has been resistant to theoretical explanation in part because past work has defined memorization based on distance to the memorized training point (see Appendix A for more discussion on definitions). It is clear from Figure 2 that distance cannot capture reconstructive memorization; the training datapoint on the left is far in pixel space from the Stable Diffusion-generated samples to its right. Our framework overcomes this challenge by interpreting memorization in relation to the model and data manifolds without reference to distances or any specific training datapoint." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": "Third, the MMH distinguishes between OD-Mem and DD-Mem, while past analyses have not. Bhattacharjee et al. (2023) would allow for OD-Mem but not DD-Mem under their definition of memorization, while empirical work tends to ignore the effect of " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "p_{*}(x)" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 82, + 506, + 149 + ], + "type": "text", + "content": ", thus subsuming both OD-Mem and DD-Mem in spirit if not formally (Carlini et al., 2023; Yoon et al., 2023; Gu et al., 2023). For further details, please see Appendix A, where we formally develop the relationship between the MMH and definitions of memorization in related work." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "text", + "content": "Defining and distinguishing between OD-Mem and DD-Mem suggests immediate solutions to each. OD-Mem is overfitting and can be addressed accordingly, such as by collecting more data or improving a model's inductive biases. On the other hand, DD-Mem indicates that the training distribution " + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "inline_equation", + "content": "p_{*}(x)" + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "text", + "content": " does not actually match the desired distribution at inference time, and hence is a misalignment of data and objectives. It can be addressed by changing " + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "inline_equation", + "content": "p_{*}(x)" + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "text", + "content": " itself, such as by altering the data collection, cleaning, and augmentation procedures. We explore this point further in Section 3. Unlike OD-Mem, DD-Mem cannot be addressed by improving the model to generalize better. Both OD-Mem and DD-Mem can also in principle be addressed by augmenting " + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 154, + 506, + 266 + ], + "type": "text", + "content": " to generate higher-LID samples. In Section 4, we propose solutions to alter the data-generating process with precisely this goal." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 277, + 348, + 290 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 277, + 348, + 290 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 348, + 290 + ], + "type": "text", + "content": "3 EXPLAINING MEMORIZATION PHENOMENA" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 298, + 506, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 298, + 506, + 387 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 506, + 387 + ], + "type": "text", + "content": "In this section, we demonstrate the explanatory power of the MMH by showing how it explains memorization phenomena in related work. In the process, this section demonstrates two advantages of our geometric framework. First, it provides a unifying perspective on seemingly disparate observations throughout the literature (nevertheless, this is not meant as a related work section — for that see Section 5). Second, the MMH links memorization to the rich theoretical toolboxes of measure theory and geometry, which we use in this section to establish formal connections to past work. Propositions, theorems, and proofs in this section are presented informally for clarity. For full theorem statements and proofs, please see Appendix E." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 392, + 506, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 392, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 392, + 506, + 437 + ], + "type": "text", + "content": "Duplicated Data and LID It has been broadly observed that memorization occurs when training points are duplicated (Nichol et al., 2022; Carlini et al., 2022; Somepalli et al., 2023a). In Proposition 3.1, we show that duplicated datapoints lead to DD-Mem; duplicated points " + }, + { + "bbox": [ + 104, + 392, + 506, + 437 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 392, + 506, + 437 + ], + "type": "text", + "content": " indicate " + }, + { + "bbox": [ + 104, + 392, + 506, + 437 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x_0) = 0" + }, + { + "bbox": [ + 104, + 392, + 506, + 437 + ], + "type": "text", + "content": ", so even a correctly fitted model will have " + }, + { + "bbox": [ + 104, + 392, + 506, + 437 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x_0) = 0" + }, + { + "bbox": [ + 104, + 392, + 506, + 437 + ], + "type": "text", + "content": " (as in Figure 1d)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 440, + 506, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 440, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 440, + 506, + 464 + ], + "type": "text", + "content": "Proposition 3.1 (Informal). Let " + }, + { + "bbox": [ + 104, + 440, + 506, + 464 + ], + "type": "inline_equation", + "content": "\\{x_{i}\\}_{i = 1}^{n}" + }, + { + "bbox": [ + 104, + 440, + 506, + 464 + ], + "type": "text", + "content": " be a training dataset drawn independently from " + }, + { + "bbox": [ + 104, + 440, + 506, + 464 + ], + "type": "inline_equation", + "content": "p_*(x)" + }, + { + "bbox": [ + 104, + 440, + 506, + 464 + ], + "type": "text", + "content": ". Under some regularity conditions, the following hold:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 127, + 472, + 504, + 525 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 129, + 472, + 504, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 472, + 504, + 495 + ], + "spans": [ + { + "bbox": [ + 129, + 472, + 504, + 495 + ], + "type": "text", + "content": "1. If duplicates occur in " + }, + { + "bbox": [ + 129, + 472, + 504, + 495 + ], + "type": "inline_equation", + "content": "\\{x_{i}\\}_{i = 1}^{n}" + }, + { + "bbox": [ + 129, + 472, + 504, + 495 + ], + "type": "text", + "content": " with positive probability, then they occur at a point " + }, + { + "bbox": [ + 129, + 472, + 504, + 495 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 129, + 472, + 504, + 495 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 129, + 472, + 504, + 495 + ], + "type": "inline_equation", + "content": "LID_{*}(x_{0}) = 0" + }, + { + "bbox": [ + 129, + 472, + 504, + 495 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 127, + 502, + 504, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 502, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 127, + 502, + 504, + 525 + ], + "type": "text", + "content": "2. If " + }, + { + "bbox": [ + 127, + 502, + 504, + 525 + ], + "type": "inline_equation", + "content": "LID_{*}(x_{0}) = 0" + }, + { + "bbox": [ + 127, + 502, + 504, + 525 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 127, + 502, + 504, + 525 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 127, + 502, + 504, + 525 + ], + "type": "text", + "content": " is sufficiently large, then duplication will occur in " + }, + { + "bbox": [ + 127, + 502, + 504, + 525 + ], + "type": "inline_equation", + "content": "\\{x_i\\}_{i=1}^n" + }, + { + "bbox": [ + 127, + 502, + 504, + 525 + ], + "type": "text", + "content": " with near-certainty." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 533, + 506, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 506, + 579 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 506, + 579 + ], + "type": "text", + "content": "Proof. See Appendix E for the formal statement of the theorem and proof. To understand both conditions, it suffices to note first that duplicate samples are intuitively equivalent to " + }, + { + "bbox": [ + 104, + 533, + 506, + 579 + ], + "type": "inline_equation", + "content": "p_{*}(x)" + }, + { + "bbox": [ + 104, + 533, + 506, + 579 + ], + "type": "text", + "content": " assigning positive probability to a point. Under mild regularity conditions on the nature of the " + }, + { + "bbox": [ + 104, + 533, + 506, + 579 + ], + "type": "inline_equation", + "content": "p_{*}(x)" + }, + { + "bbox": [ + 104, + 533, + 506, + 579 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 533, + 506, + 579 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 533, + 506, + 579 + ], + "type": "text", + "content": ", positive probability at a point is equivalent to a 0-dimensional manifold at that point." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "text", + "content": "From this result, we gather that improving model generalization is not the solution to duplication. Instead, one may need to add inductive biases that prevent " + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "text", + "content": " from learning 0-dimensional points. Of course, the more straightforward path is to change the data distribution " + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "inline_equation", + "content": "p_{*}(x)" + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "text", + "content": " by de-duplicating the training dataset. We carry the same intuition forward to \"near-duplicated content\", where similar but non-identical points occur together in the dataset, in which case " + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{*}" + }, + { + "bbox": [ + 104, + 582, + 506, + 650 + ], + "type": "text", + "content": " would be low but nonzero in the region of the near-duplicated content (as in Figure 1e)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 654, + 504, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 504, + 688 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 504, + 688 + ], + "type": "text", + "content": "Conditioning and LID Somepalli et al. (2023b) and Yoon et al. (2023) observe that conditioning on highly specific prompts " + }, + { + "bbox": [ + 104, + 654, + 504, + 688 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 654, + 504, + 688 + ], + "type": "text", + "content": " encourages the generation of memorized samples. Here, we point out that conditioning decreases LID, making models more likely to generate memorized samples." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "type": "text", + "content": "Proposition 3.2 (Informal). Let " + }, + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "type": "inline_equation", + "content": "x_0 \\in \\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "type": "text", + "content": ", and let us denote by " + }, + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "type": "inline_equation", + "content": "LID_{*}(x_0 \\mid c)" + }, + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "type": "text", + "content": " the " + }, + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "type": "inline_equation", + "content": "LID" + }, + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "type": "text", + "content": " with respect to the support of the conditional distribution " + }, + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "type": "inline_equation", + "content": "p_{*}(x \\mid c)" + }, + { + "bbox": [ + 104, + 691, + 504, + 715 + ], + "type": "text", + "content": ". We then have" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 249, + 719, + 505, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 719, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 249, + 719, + 505, + 734 + ], + "type": "interline_equation", + "content": "L I D _ {*} \\left(x _ {0} \\mid c\\right) \\leq L I D _ {*} \\left(x _ {0}\\right). \\tag {1}", + "image_path": "d4e9a9241ed92b5c5d3a7a6228fc712ca24922f3b9ef04cba33f2ecc8da2bb6b.jpg" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "Proof. See Appendix E for the formal statement of the theorem and proof. Intuitively, conditioning can be interpreted as adding additional constraints to " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_*" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": ", which cannot increase its dimension. " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\square" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 116, + 504, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 116, + 504, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 116, + 504, + 150 + ], + "type": "text", + "content": "Conditioning on highly specific " + }, + { + "bbox": [ + 104, + 116, + 504, + 150 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 116, + 504, + 150 + ], + "type": "text", + "content": " can be linked to both DD-Mem and OD-Mem. Introducing strong constraints greatly decreases " + }, + { + "bbox": [ + 104, + 116, + 504, + 150 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{*}" + }, + { + "bbox": [ + 104, + 116, + 504, + 150 + ], + "type": "text", + "content": ", leading to DD-Mem. However, if a relatively low number of training examples satisfy " + }, + { + "bbox": [ + 104, + 116, + 504, + 150 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 116, + 504, + 150 + ], + "type": "text", + "content": ", the model could overfit, leading to OD-Mem as well." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 155, + 504, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 155, + 504, + 201 + ], + "spans": [ + { + "bbox": [ + 104, + 155, + 504, + 201 + ], + "type": "text", + "content": "Complexity and LID For images, Somepalli et al. (2023b) also highlight low complexity as a factor causing memorization. Using the understanding that LID corresponds to complexity as discussed in Section 2, we infer that low-complexity datapoints " + }, + { + "bbox": [ + 104, + 155, + 504, + 201 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{M}_*" + }, + { + "bbox": [ + 104, + 155, + 504, + 201 + ], + "type": "text", + "content": " have low " + }, + { + "bbox": [ + 104, + 155, + 504, + 201 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x)" + }, + { + "bbox": [ + 104, + 155, + 504, + 201 + ], + "type": "text", + "content": ". This fact suggests that, like with duplication, memorization of low-complexity datapoints is an example of DD-Mem." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 205, + 336, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 205, + 336, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 205, + 336, + 282 + ], + "type": "text", + "content": "The Classifier-Free Guidance Norm and LID Classifier-free guidance (CFG) is a way to improve the quality of conditional generation in DMs. Whereas standard conditional generation employs the score function " + }, + { + "bbox": [ + 105, + 205, + 336, + 282 + ], + "type": "inline_equation", + "content": "s_{\\theta}(x; t, c)" + }, + { + "bbox": [ + 105, + 205, + 336, + 282 + ], + "type": "text", + "content": ", which refers to a neural estimate at time " + }, + { + "bbox": [ + 105, + 205, + 336, + 282 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 105, + 205, + 336, + 282 + ], + "type": "text", + "content": " of the conditional score, CFG increases the strength of conditioning by using the following modified score:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 289, + 336, + 314 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 289, + 336, + 314 + ], + "spans": [ + { + "bbox": [ + 111, + 289, + 336, + 314 + ], + "type": "interline_equation", + "content": "\\underbrace {s _ {\\theta} ^ {\\mathrm {C F G}} (x ; t , c)} _ {\\text {C F G - a d j u s t e d s c o r e}} = s _ {\\theta} (x; t, \\emptyset) + \\lambda (\\underbrace {s _ {\\theta} (x ; t , c) - s _ {\\theta} (x ; t , \\emptyset)} _ {\\text {C F G v e c t o r}}), \\tag {2}", + "image_path": "7ffb82f2796ea63ed1a5d46ec4b1dfabdfee0405c7f60f3cc55c243036ad2fa0.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 321, + 336, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 321, + 336, + 366 + ], + "spans": [ + { + "bbox": [ + 104, + 321, + 336, + 366 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 321, + 336, + 366 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 321, + 336, + 366 + ], + "type": "text", + "content": " is a hyperparameter for \"guidance strength\" and " + }, + { + "bbox": [ + 104, + 321, + 336, + 366 + ], + "type": "inline_equation", + "content": "s_{\\theta}(x; t, \\emptyset)" + }, + { + "bbox": [ + 104, + 321, + 336, + 366 + ], + "type": "text", + "content": " refers to conditioning on the empty string (here we formulate DMs using stochastic differential equations (Song et al., 2021))." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 344, + 205, + 493, + 332 + ], + "blocks": [ + { + "bbox": [ + 344, + 205, + 493, + 332 + ], + "lines": [ + { + "bbox": [ + 344, + 205, + 493, + 332 + ], + "spans": [ + { + "bbox": [ + 344, + 205, + 493, + 332 + ], + "type": "image", + "image_path": "66d55de27be629a4d98f033ad17217e50168c1be75f08feb479d70af61f6fb33.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 342, + 335, + 504, + 361 + ], + "lines": [ + { + "bbox": [ + 342, + 335, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 342, + 335, + 504, + 361 + ], + "type": "text", + "content": "Figure 3: CFG-adjusted scores vs CFG vectors for Stable Diffusion with " + }, + { + "bbox": [ + 342, + 335, + 504, + 361 + ], + "type": "inline_equation", + "content": "\\lambda = 7.5" + }, + { + "bbox": [ + 342, + 335, + 504, + 361 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 342, + 335, + 504, + 361 + ], + "type": "inline_equation", + "content": "t = 0.02" + }, + { + "bbox": [ + 342, + 335, + 504, + 361 + ], + "type": "text", + "content": " on 20 memorized and 20 non-memorized images from LAION." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "content": "Wen et al. (2023) identify that specific conditioning inputs " + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "content": " lead to memorized samples when the CFG vector has a large magnitude. We explain this observation using the MMH as follows. First, we observe that a large CFG magnitude will generally result in a large magnitude of the CFG-adjusted score " + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "inline_equation", + "content": "s_{\\theta}^{\\mathrm{CFG}}(x; t, c)" + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "content": ". We demonstrate this empirically in Figure 3. Furthermore, it is understood in the literature that a large " + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "inline_equation", + "content": "\\| s_{\\theta}^{\\mathrm{CFG}}(x; t, c) \\|" + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "content": ", and its explosion as " + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "inline_equation", + "content": "t \\to 0" + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "content": ", is common for high-dimensional data (Vahdat et al., 2021) and is necessary to generate samples from low-dimensional manifolds (Pidstrigach, 2022; Lu et al., 2023). It has been empirically observed that this explosion occurs faster as the dimensionality gap increases between the data manifold and the ambient data space (Loaiza-Ganem et al., 2024), which is one reason that generative modelling on lower-dimensional latent space tends to improve performance (Loaiza-Ganem et al., 2022). The largest " + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "inline_equation", + "content": "\\| s_{\\theta}^{\\mathrm{CFG}}(x; t, c) \\|" + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "content": " values should thus generate points with the largest dimensionality difference from " + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "content": "; i.e., points " + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "content": " with the smallest " + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x|c)" + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "content": ". Hence we infer that reducing the CFG-adjusted score norm – or equivalently the CFG vector norm – should increase " + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x|c)" + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "content": " and lessen memorization, a fact confirmed empirically by Wen et al. (2023). Since this phenomenon corresponds to any " + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "content": " with small " + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x|c)" + }, + { + "bbox": [ + 104, + 371, + 506, + 539 + ], + "type": "text", + "content": ", it can indicate both OD-Mem and DD-Mem under the MMH." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 556, + 200, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 556, + 200, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 200, + 567 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 578, + 379, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 379, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 379, + 588 + ], + "type": "text", + "content": "4.1 VERIFYING THE MANIFOLD MEMORIZATION HYPOTHESIS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "text", + "content": "In this section, we empirically verify the geometric framework which underpins the MMH. We analyze both " + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{*}" + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "text", + "content": " to study DD-Mem and OD-Mem. Several algorithms exist to estimate " + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "text", + "content": " for diffusion models, including the normal bundle (NB) method (Stanczuk et al., 2024), and more recently FLIPD (Kamkari et al., 2024b). For GANs, we approximate " + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "text", + "content": " of generated data by computing the rank of the Jacobian of the generator. Additionally, we use LPCA (Fukunaga & Olsen, 1971) to estimate " + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{*}" + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "text", + "content": " where applicable; see Appendix B for details on these methods and Appendix C for their hyperparameter configurations. In general " + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{*}" + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "text", + "content": " are unknown quantities that are approximated with the aforementioned estimators, throughout this section we write their respective estimates as " + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{*}" + }, + { + "bbox": [ + 104, + 598, + 504, + 700 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Here we understand " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x|c)" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": " as the LID with respect to the support of the conditional distribution being sampled from by " + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "inline_equation", + "content": "s_{\\theta}^{\\mathrm{CFG}}(x;t,c)" + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "text", + "content": "Diffusion Model on a von Mises Mixture In an illustrative experiment, we study a mixture of a von Mises distribution, which sits on a 1-dimensional circle, and a 0-dimensional point mass at the origin in 2-dimensional ambient space, as depicted in Figure 4; every point " + }, + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{M}_*" + }, + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "text", + "content": " has either " + }, + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x) = 0" + }, + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "text", + "content": " or " + }, + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x) = 1" + }, + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "text", + "content": ". From this distribution we sample 100 training points, and by chance a single point " + }, + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "text", + "content": " sits isolated in a low-density region of the circle. Next, we train a DM on this data. In Figure 4 we depict 100 generated samples, colour-coded by their LID estimates, as estimated by FLIPD. Here, we see OD-Mem and DD-Mem in action: the model overfits at " + }, + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "text", + "content": ", producing near-exact copies, with " + }, + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "inline_equation", + "content": "0 \\approx \\widehat{\\mathrm{LID}}_{\\theta}(x_0) < \\mathrm{LID}_*(x_0) = 1" + }, + { + "bbox": [ + 104, + 82, + 378, + 239 + ], + "type": "text", + "content": " (OD-Mem). The model also faithfully produces copies of the circle's center, but this is caused by low ground truth LIDs (DD-Mem), not modelling error." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 243, + 378, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 378, + 320 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 378, + 320 + ], + "type": "text", + "content": "CIFAR10 Memorization We analyze the higher-dimensional CIFAR10 dataset (Krizhevsky & Hinton, 2009) and use two pretrained generative models: iDDPM (Nichol & Dhariwal, 2021) and StyleGAN2-ADA (Karras et al., 2020). We generate 50,000 images from each model, and for each, we identify the most similar training image according to two distance metrics, " + }, + { + "bbox": [ + 104, + 243, + 378, + 320 + ], + "type": "inline_equation", + "content": "(i)" + }, + { + "bbox": [ + 104, + 243, + 378, + 320 + ], + "type": "text", + "content": " SSCD distance (Pizzi et al., 2022) and " + }, + { + "bbox": [ + 104, + 243, + 378, + 320 + ], + "type": "inline_equation", + "content": "(ii)" + }, + { + "bbox": [ + 104, + 243, + 378, + 320 + ], + "type": "text", + "content": " calibrated " + }, + { + "bbox": [ + 104, + 243, + 378, + 320 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 243, + 378, + 320 + ], + "type": "text", + "content": " distance (Carlini et al., 2023). By" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 320, + 506, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 320, + 506, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 320, + 506, + 388 + ], + "type": "text", + "content": "thresholding on these metrics, we arrive at a small subset of potentially memorized examples, which we manually label as either exactly memorized, reconstructively memorized (Somepalli et al., 2023a), or not memorized. All other images are not labelled and have a low chance of being memorized. Further details and all images deemed memorized are reported in Appendix F. The first two panels in Figure 5a show our labels distinguish different types of memorization as we display the generated images vs. the closest SSCD match in the training dataset." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "text", + "content": "Next, we estimate " + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "text", + "content": " for each iDDPM and StyleGAN2-ADA sample. For iDDPM, we use the NB estimator. Figure 5b and Figure 5c show that " + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "text", + "content": " is generally smaller for memorized images compared to non-memorized ones. As shown in Figure 5d, " + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{*}" + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "text", + "content": " is considerably lower for exact memorization cases within the training dataset, suggesting that exact memorization for both models corresponds to DD-Mem. We also observe that in Figure 5b, reconstructively memorized samples exhibit lower values of " + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "text", + "content": " as compared to not memorized samples, despite the corresponding training data having comparable " + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{*}" + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "text", + "content": " (Figure 5d): the " + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 391, + 504, + 489 + ], + "type": "text", + "content": " estimates enable us to still classify these samples as memorized, showing a clear example of detecting OD-Mem." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 493, + 504, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 493, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 493, + 504, + 539 + ], + "type": "text", + "content": "We have shown that LID estimates are effective at detecting both OD-Mem and DD-Mem, supporting the MMH hypothesis. However, while simpler images tend to be memorized more frequently, they are not always memorized (see Figure 5a, right panel), leading to some overlap in estimated " + }, + { + "bbox": [ + 104, + 493, + 504, + 539 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 493, + 504, + 539 + ], + "type": "text", + "content": " between memorized and not memorized samples in Figure 5b and Figure 5c. This overlap occurs" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 395, + 82, + 504, + 265 + ], + "blocks": [ + { + "bbox": [ + 395, + 82, + 504, + 265 + ], + "lines": [ + { + "bbox": [ + 395, + 82, + 504, + 265 + ], + "spans": [ + { + "bbox": [ + 395, + 82, + 504, + 265 + ], + "type": "image", + "image_path": "34f4d20f2143cb66db99880965c231ec7e5217f10292416227f7ce4de06ef45c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 381, + 269, + 505, + 310 + ], + "lines": [ + { + "bbox": [ + 381, + 269, + 505, + 310 + ], + "spans": [ + { + "bbox": [ + 381, + 269, + 505, + 310 + ], + "type": "text", + "content": "Figure 4: Training a diffusion model on a von Mises mixture. (Top) Ground truth manifold and the associated distribution. (Bottom) Model-generated samples with their LID estimates." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 108, + 557, + 233, + 594 + ], + "blocks": [ + { + "bbox": [ + 108, + 557, + 233, + 594 + ], + "lines": [ + { + "bbox": [ + 108, + 557, + 233, + 594 + ], + "spans": [ + { + "bbox": [ + 108, + 557, + 233, + 594 + ], + "type": "image", + "image_path": "f1bd02d17df4bac2d4f5025269f9d1c0dcc5777f5c492d97ce0e49a3f9dfe053.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 599, + 504, + 617 + ], + "lines": [ + { + "bbox": [ + 105, + 599, + 504, + 617 + ], + "spans": [ + { + "bbox": [ + 105, + 599, + 504, + 617 + ], + "type": "text", + "content": "(a) (Left) Exact and (Middle) reconstructively memorized samples (Top) with their matched CIFAR10 datapoints (Bottom). (Right) Non-memorized samples with low " + }, + { + "bbox": [ + 105, + 599, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 105, + 599, + 504, + 617 + ], + "type": "text", + "content": ", showing " + }, + { + "bbox": [ + 105, + 599, + 504, + 617 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 105, + 599, + 504, + 617 + ], + "type": "text", + "content": " can be partially confounded by complexity." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 238, + 557, + 363, + 594 + ], + "blocks": [ + { + "bbox": [ + 238, + 557, + 363, + 594 + ], + "lines": [ + { + "bbox": [ + 238, + 557, + 363, + 594 + ], + "spans": [ + { + "bbox": [ + 238, + 557, + 363, + 594 + ], + "type": "image", + "image_path": "2c3e6549555e05f1df34c0d26b26b0eaa3e0e34ee184210db70faefa64fa6500.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 365, + 558, + 492, + 594 + ], + "blocks": [ + { + "bbox": [ + 365, + 558, + 492, + 594 + ], + "lines": [ + { + "bbox": [ + 365, + 558, + 492, + 594 + ], + "spans": [ + { + "bbox": [ + 365, + 558, + 492, + 594 + ], + "type": "image", + "image_path": "9cc54f7dc435761054893e9e463ae1ef4fc73f92a7609b6e7e2fbdd45a7d02d4.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 111, + 622, + 239, + 698 + ], + "blocks": [ + { + "bbox": [ + 111, + 622, + 239, + 698 + ], + "lines": [ + { + "bbox": [ + 111, + 622, + 239, + 698 + ], + "spans": [ + { + "bbox": [ + 111, + 622, + 239, + 698 + ], + "type": "image", + "image_path": "0a5a9238f8037a101b5717ac3f637e9d6f130d41ca3d8912f87d559dde3a3610.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 135, + 702, + 216, + 712 + ], + "lines": [ + { + "bbox": [ + 135, + 702, + 216, + 712 + ], + "spans": [ + { + "bbox": [ + 135, + 702, + 216, + 712 + ], + "type": "text", + "content": "(b) " + }, + { + "bbox": [ + 135, + 702, + 216, + 712 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 135, + 702, + 216, + 712 + ], + "type": "text", + "content": " for StyleGAN samples." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 242, + 623, + 369, + 697 + ], + "blocks": [ + { + "bbox": [ + 242, + 623, + 369, + 697 + ], + "lines": [ + { + "bbox": [ + 242, + 623, + 369, + 697 + ], + "spans": [ + { + "bbox": [ + 242, + 623, + 369, + 697 + ], + "type": "image", + "image_path": "f28a23e292dc09c480a0f2706ab2ba9c1f2eaeb70e353eec72dda361cec8374a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 267, + 702, + 342, + 712 + ], + "lines": [ + { + "bbox": [ + 267, + 702, + 342, + 712 + ], + "spans": [ + { + "bbox": [ + 267, + 702, + 342, + 712 + ], + "type": "text", + "content": "(c) " + }, + { + "bbox": [ + 267, + 702, + 342, + 712 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 267, + 702, + 342, + 712 + ], + "type": "text", + "content": " for iDDPM samples." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 370, + 622, + 498, + 697 + ], + "blocks": [ + { + "bbox": [ + 370, + 622, + 498, + 697 + ], + "lines": [ + { + "bbox": [ + 370, + 622, + 498, + 697 + ], + "spans": [ + { + "bbox": [ + 370, + 622, + 498, + 697 + ], + "type": "image", + "image_path": "65f041239c6d50c035226b77e933c6d78b6d43f42328bf0db5043e35d6e269ac.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 392, + 702, + 476, + 712 + ], + "lines": [ + { + "bbox": [ + 392, + 702, + 476, + 712 + ], + "spans": [ + { + "bbox": [ + 392, + 702, + 476, + 712 + ], + "type": "text", + "content": "(d) " + }, + { + "bbox": [ + 392, + 702, + 476, + 712 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_*" + }, + { + "bbox": [ + 392, + 702, + 476, + 712 + ], + "type": "text", + "content": " for CIFAR10 datapoints." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 160, + 719, + 449, + 730 + ], + "lines": [ + { + "bbox": [ + 160, + 719, + 449, + 730 + ], + "spans": [ + { + "bbox": [ + 160, + 719, + 449, + 730 + ], + "type": "text", + "content": "Figure 5: Visualizing OD-Mem and DD-Mem on StyleGAN2-ADA and iDDPM trained on CIFAR10." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 79, + 236, + 155 + ], + "blocks": [ + { + "bbox": [ + 107, + 79, + 236, + 155 + ], + "lines": [ + { + "bbox": [ + 107, + 79, + 236, + 155 + ], + "spans": [ + { + "bbox": [ + 107, + 79, + 236, + 155 + ], + "type": "image", + "image_path": "4c2dedfd3d67d8f76ece9750b034e4ee64d3d5a7cdbb6d9b052fc8ed6b01f63c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 157, + 160, + 183, + 170 + ], + "lines": [ + { + "bbox": [ + 157, + 160, + 183, + 170 + ], + "spans": [ + { + "bbox": [ + 157, + 160, + 183, + 170 + ], + "type": "text", + "content": "(a) " + }, + { + "bbox": [ + 157, + 160, + 183, + 170 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 241, + 79, + 370, + 156 + ], + "blocks": [ + { + "bbox": [ + 241, + 79, + 370, + 156 + ], + "lines": [ + { + "bbox": [ + 241, + 79, + 370, + 156 + ], + "spans": [ + { + "bbox": [ + 241, + 79, + 370, + 156 + ], + "type": "image", + "image_path": "51370705eadde98ac02dc7d0e4c6c4553722b9b4d3305d7ab3c10f4b32213880.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 283, + 160, + 326, + 170 + ], + "lines": [ + { + "bbox": [ + 283, + 160, + 326, + 170 + ], + "spans": [ + { + "bbox": [ + 283, + 160, + 326, + 170 + ], + "type": "text", + "content": "(b) " + }, + { + "bbox": [ + 283, + 160, + 326, + 170 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}(\\cdot \\mid c)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 376, + 80, + 504, + 156 + ], + "blocks": [ + { + "bbox": [ + 376, + 80, + 504, + 156 + ], + "lines": [ + { + "bbox": [ + 376, + 80, + 504, + 156 + ], + "spans": [ + { + "bbox": [ + 376, + 80, + 504, + 156 + ], + "type": "image", + "image_path": "a5c00433540992056406ac9c19a0c94a274fc0cb6ab5fcedc5bcfdc109b331b5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 413, + 161, + 466, + 169 + ], + "lines": [ + { + "bbox": [ + 413, + 161, + 466, + 169 + ], + "spans": [ + { + "bbox": [ + 413, + 161, + 466, + 169 + ], + "type": "text", + "content": "(c) CFG vector norm." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 184, + 175, + 424, + 185 + ], + "lines": [ + { + "bbox": [ + 184, + 175, + 424, + 185 + ], + "spans": [ + { + "bbox": [ + 184, + 175, + 424, + 185 + ], + "type": "text", + "content": "Figure 6: Density histograms for each memorization metric across different datasets." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 195, + 506, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 195, + 506, + 229 + ], + "spans": [ + { + "bbox": [ + 104, + 195, + 506, + 229 + ], + "type": "text", + "content": "because image complexity serves as a confounding factor: images with simple backgrounds and textures may be assigned low " + }, + { + "bbox": [ + 104, + 195, + 506, + 229 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 195, + 506, + 229 + ], + "type": "text", + "content": " values, not due to memorization, but simply because of their inherent simplicity. We discuss this issue further, along with a partial solution, in Appendix C.2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 233, + 506, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 233, + 506, + 323 + ], + "spans": [ + { + "bbox": [ + 104, + 233, + 506, + 323 + ], + "type": "text", + "content": "Stable Diffusion on Large-Scale Image Datasets Here, we set " + }, + { + "bbox": [ + 104, + 233, + 506, + 323 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 233, + 506, + 323 + ], + "type": "text", + "content": " to Stable Diffusion v1.5 (Rombach et al., 2022). Taking inspiration from the benchmark of Wen et al. (2023), we retrieve memorized LAION (Schuhmann et al., 2022) training images identified by Webster (2023). We focus on the 86 memorized images categorized as \"matching verbatim\", noting that the other categories of Webster (2023) consist of large numbers of captions that generate samples matching a small set of training images. For non-memorized images, we use a mix of 2000 images sampled from LAION Aesthetics 6.5+, 2000 sampled from COCO (Lin et al., 2014), and all 251 images from the Tuxemon dataset (Tuxemon Project, 2024; Hugging Face, 2024)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "text", + "content": "To our knowledge, no estimator of " + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*" + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "text", + "content": " scales to images at the size of Stable Diffusion; we thus omit these from our analysis. FLIPD is the only " + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "text", + "content": " estimator that remains tractable at this scale, so we use it for this analysis. Note that Stable Diffusion provides two model distributions: the unconditional distribution " + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "text", + "content": " and the conditional distribution " + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x|c)" + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "text", + "content": " is the image's caption. Hence, we compute both " + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}(\\cdot |c)" + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "text", + "content": " for each of the aforementioned images. Additionally, we compute the norm of the CFG vector, which was proposed as a memorization detection method by Wen et al. (2023) and which we argued varies inversely to " + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "text", + "content": " in Section 3. Our experiments thus cover three proxies for local intrinsic dimension: " + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}, \\widehat{\\mathrm{LID}}_{\\theta}(\\cdot |c)" + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "text", + "content": ", and the CFG vector norm (see Appendix C for details). The density histograms of all these values are depicted in Figure 6." + }, + { + "bbox": [ + 104, + 327, + 506, + 433 + ], + "type": "inline_equation", + "content": "^4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 436, + 506, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 436, + 506, + 515 + ], + "spans": [ + { + "bbox": [ + 104, + 436, + 506, + 515 + ], + "type": "text", + "content": "We see that all proxies for " + }, + { + "bbox": [ + 104, + 436, + 506, + 515 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 436, + 506, + 515 + ], + "type": "text", + "content": " assign relatively small LID values to memorized images, further validating the MMH. Due to the unavailability of " + }, + { + "bbox": [ + 104, + 436, + 506, + 515 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{*}" + }, + { + "bbox": [ + 104, + 436, + 506, + 515 + ], + "type": "text", + "content": " estimates, it is hard to distinguish between DD-Mem and OD-Mem here. In Figure 6, low conditional or unconditional LID as well as high CFG vector norms are all signals of memorization, strengthening our argument in Section 3. While the CFG vector norm seemingly provides the strongest signal, the unconditional LID detects memorization well despite lacking caption information. Detecting memorized training images without the corresponding captions is a novel capability, and notably cannot be done with the CFG vector norm technique." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 529, + 362, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 362, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 362, + 540 + ], + "type": "text", + "content": "4.2 MITIGATING MEMORIZATION BY CONTROLLING " + }, + { + "bbox": [ + 105, + 529, + 362, + 540 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 550, + 506, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 550, + 506, + 650 + ], + "spans": [ + { + "bbox": [ + 104, + 550, + 506, + 650 + ], + "type": "text", + "content": "In this section we study the problem of sample-time mitigation through the lens of the MMH. Somepalli et al. (2023b) establish text-conditioning as a crucial driver of memorization in Stable Diffusion, where specific tokens in the prompt often cause the model to generate replicas of training images. Wen et al. (2023) introduce a differentiable metric, which we denote as " + }, + { + "bbox": [ + 104, + 550, + 506, + 650 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}(c)" + }, + { + "bbox": [ + 104, + 550, + 506, + 650 + ], + "type": "text", + "content": " (formally defined in Appendix D), which is based on the accumulated CFG vector norm while sampling an image. Wen et al. (2023) observe that this metric shows a sharp increase when the prompt " + }, + { + "bbox": [ + 104, + 550, + 506, + 650 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 550, + 506, + 650 + ], + "type": "text", + "content": " leads to the generation of memorized images. Since " + }, + { + "bbox": [ + 104, + 550, + 506, + 650 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}(c)" + }, + { + "bbox": [ + 104, + 550, + 506, + 650 + ], + "type": "text", + "content": " is differentiable with respect to " + }, + { + "bbox": [ + 104, + 550, + 506, + 650 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 550, + 506, + 650 + ], + "type": "text", + "content": ", Wen et al. (2023) backpropagate through this metric and find the tokens with the largest gradient magnitude, essentially providing token attributions for memorization." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 654, + 506, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 506, + 681 + ], + "type": "text", + "content": "Here we make two contributions. First, we propose two additional metrics, " + }, + { + "bbox": [ + 104, + 654, + 506, + 681 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{s_{\\theta}^{\\mathrm{CFG}}}(c)" + }, + { + "bbox": [ + 104, + 654, + 506, + 681 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 654, + 506, + 681 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}(c)" + }, + { + "bbox": [ + 104, + 654, + 506, + 681 + ], + "type": "text", + "content": ", which are modifications of " + }, + { + "bbox": [ + 104, + 654, + 506, + 681 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}(c)" + }, + { + "bbox": [ + 104, + 654, + 506, + 681 + ], + "type": "text", + "content": " to use " + }, + { + "bbox": [ + 104, + 654, + 506, + 681 + ], + "type": "inline_equation", + "content": "\\| s_{\\theta}^{\\mathrm{CFG}}(x;t,c)\\|" + }, + { + "bbox": [ + 104, + 654, + 506, + 681 + ], + "type": "text", + "content": " or FLIPD, respectively, instead of the" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 690, + 506, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 690, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 690, + 506, + 733 + ], + "type": "text", + "content": "4The LID estimates provided by FLIPD are sometimes negative in value; Kamkari et al. (2024b) justify this as an artifact of estimating the LID using a UNet. Despite underestimating LID in absolute terms, Kamkari et al. (2024b) confirm that FLIPD ranks " + }, + { + "bbox": [ + 104, + 690, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 690, + 506, + 733 + ], + "type": "text", + "content": " estimates correctly, which is sufficient for the purpose of distinguishing memorized from non-memorized examples." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 93, + 252, + 200 + ], + "blocks": [ + { + "bbox": [ + 107, + 93, + 252, + 200 + ], + "lines": [ + { + "bbox": [ + 107, + 93, + 252, + 200 + ], + "spans": [ + { + "bbox": [ + 107, + 93, + 252, + 200 + ], + "type": "image", + "image_path": "693b9a67ea2e6084e8a5628520a2963fc56720fded7e8d8c54815dba2ea49ee4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 137, + 205, + 235, + 213 + ], + "lines": [ + { + "bbox": [ + 137, + 205, + 235, + 213 + ], + "spans": [ + { + "bbox": [ + 137, + 205, + 235, + 213 + ], + "type": "text", + "content": "(a) Analysis of the mitigation approach." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 265, + 86, + 504, + 132 + ], + "blocks": [ + { + "bbox": [ + 265, + 86, + 504, + 132 + ], + "lines": [ + { + "bbox": [ + 265, + 86, + 504, + 132 + ], + "spans": [ + { + "bbox": [ + 265, + 86, + 504, + 132 + ], + "type": "image", + "image_path": "7cb82521ab5f7f4e7f26d836c7a238df117edea7ed126086d88e293479f8bc3d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 270, + 133, + 329, + 198 + ], + "blocks": [ + { + "bbox": [ + 270, + 133, + 329, + 198 + ], + "lines": [ + { + "bbox": [ + 270, + 133, + 329, + 198 + ], + "spans": [ + { + "bbox": [ + 270, + 133, + 329, + 198 + ], + "type": "image", + "image_path": "e16a39f80997bb33dd2be408010faeefce2345eea0a3d2920959f78ea8de9d13.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 161, + 220, + 448, + 229 + ], + "lines": [ + { + "bbox": [ + 161, + 220, + 448, + 229 + ], + "spans": [ + { + "bbox": [ + 161, + 220, + 448, + 229 + ], + "type": "text", + "content": "Figure 7: Using token attributions to detect drivers of memorization and to mitigate it at sample time." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 330, + 133, + 387, + 198 + ], + "blocks": [ + { + "bbox": [ + 330, + 133, + 387, + 198 + ], + "lines": [ + { + "bbox": [ + 330, + 133, + 387, + 198 + ], + "spans": [ + { + "bbox": [ + 330, + 133, + 387, + 198 + ], + "type": "image", + "image_path": "cfcbb3999a906a22576b2542e04b61ab6faa244dae8ace9039eec1b7f0f9cb4a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 271, + 205, + 496, + 213 + ], + "lines": [ + { + "bbox": [ + 271, + 205, + 496, + 213 + ], + "spans": [ + { + "bbox": [ + 271, + 205, + 496, + 213 + ], + "type": "text", + "content": "(b) Comparing (normalized) token attributions for a memorized prompt using three methods." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 388, + 134, + 445, + 198 + ], + "blocks": [ + { + "bbox": [ + 388, + 134, + 445, + 198 + ], + "lines": [ + { + "bbox": [ + 388, + 134, + 445, + 198 + ], + "spans": [ + { + "bbox": [ + 388, + 134, + 445, + 198 + ], + "type": "image", + "image_path": "7ee43ec47299bd473868b42bcac00ca01c601787217d1874bb525e1a300428dc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 446, + 133, + 503, + 198 + ], + "blocks": [ + { + "bbox": [ + 446, + 133, + 503, + 198 + ], + "lines": [ + { + "bbox": [ + 446, + 133, + 503, + 198 + ], + "spans": [ + { + "bbox": [ + 446, + 133, + 503, + 198 + ], + "type": "image", + "image_path": "03cd7c18ee2c02c6a77a92dd6c633a6de44ed7d4867ee0f27813d9933ae95fcf.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 247, + 506, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 506, + 336 + ], + "type": "text", + "content": "norm of the CFG vector. We define these metrics fully in Appendix D due to space limitations. Since both of these new metrics are also differentiable with respect to " + }, + { + "bbox": [ + 104, + 247, + 506, + 336 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 247, + 506, + 336 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 247, + 506, + 336 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}(c)" + }, + { + "bbox": [ + 104, + 247, + 506, + 336 + ], + "type": "text", + "content": " can be trivially replaced by either of them in the method of Wen et al. (2023). Second, we propose an automated way to use the token attributions from this method into a sample-time mitigation scheme. We start by normalizing the attributions across the tokens, and sample " + }, + { + "bbox": [ + 104, + 247, + 506, + 336 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 247, + 506, + 336 + ], + "type": "text", + "content": " tokens based on a categorical distribution parameterized by these normalized attributions. We then use GPT-4 (OpenAI, 2023) to rephrase the caption, keeping it semantically similar but perturbing the selected " + }, + { + "bbox": [ + 104, + 247, + 506, + 336 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 247, + 506, + 336 + ], + "type": "text", + "content": " tokens that are highly contributing to the memorization metric (see Appendix D.4 for details)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 341, + 506, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 341, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 104, + 341, + 506, + 407 + ], + "type": "text", + "content": "The bottom panel in Figure 7b shows four images: a training image corresponding to the prompt \"The Great Wave off Kanagawa by Katsushika Hokusai\", a generated image using the same prompt showing clear memorization, a generated image obtained with our mitigation scheme with " + }, + { + "bbox": [ + 104, + 341, + 506, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}(c)" + }, + { + "bbox": [ + 104, + 341, + 506, + 407 + ], + "type": "text", + "content": ", and another generated image using " + }, + { + "bbox": [ + 104, + 341, + 506, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}(c)" + }, + { + "bbox": [ + 104, + 341, + 506, + 407 + ], + "type": "text", + "content": " instead. Qualitatively, using FLIPD or the norm of the CFG vector perform on par with each other. The top panel of Figure 7b shows the token attributions obtained from " + }, + { + "bbox": [ + 104, + 341, + 506, + 407 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}(c)" + }, + { + "bbox": [ + 104, + 341, + 506, + 407 + ], + "type": "text", + "content": " are sensible. See Appendix D.5 for additional results." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 412, + 504, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 412, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 412, + 504, + 479 + ], + "type": "text", + "content": "We present quantitative comparisons in Figure 7a by analyzing the average CLIP score (higher is better) (Radford et al., 2021) and SSCD similarity (lower is better) over matching prompts, varying " + }, + { + "bbox": [ + 104, + 412, + 504, + 479 + ], + "type": "inline_equation", + "content": "k \\in \\{1,2,3,4,6,8\\}" + }, + { + "bbox": [ + 104, + 412, + 504, + 479 + ], + "type": "text", + "content": ", with 5 repetitions for each prompt. As " + }, + { + "bbox": [ + 104, + 412, + 504, + 479 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 412, + 504, + 479 + ], + "type": "text", + "content": " increases, both SSCD and CLIP scores decrease across methods. We also include an ablation where the modified tokens are selected uniformly at random, ignoring attributions. All attribution-based methods achieve lower similarity while maintaining a relatively higher CLIP score than uniform token selection." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 483, + 506, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 506, + 573 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 506, + 573 + ], + "type": "text", + "content": "Overall, the results in Figure 7 provide further evidence supporting the MMH, both by showing that encouraging samples to have higher LID can help prevent memorization, and by further confirming the relationship between the CFG vector norm, the CFG-adjusted score norm, and LID established in Section 3. We hypothesize that our results can likely be improved by more efficiently guiding generated samples towards regions of high LID, but highlight that doing so is not trivial. For example, in Appendix D.3 we find that optimizing " + }, + { + "bbox": [ + 104, + 483, + 506, + 573 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 483, + 506, + 573 + ], + "type": "text", + "content": " for large values of " + }, + { + "bbox": [ + 104, + 483, + 506, + 573 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}(c)" + }, + { + "bbox": [ + 104, + 483, + 506, + 573 + ], + "type": "text", + "content": " (inspired by the inference-time mitigation method of (Wen et al., 2023)) during sampling can fail by producing samples with chaotic textures that have artificially high " + }, + { + "bbox": [ + 104, + 483, + 506, + 573 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 483, + 506, + 573 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 594, + 212, + 607 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 212, + 607 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 212, + 607 + ], + "type": "text", + "content": "5 RELATED WORK" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "type": "text", + "content": "Detecting and Preventing Memorization for Image Models The task of surfacing memorized samples is well-studied. Consensus in the literature is that " + }, + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "type": "text", + "content": " distance to the nearest training sample in pixel space is a poor detector of memorized samples (Carlini et al., 2023), but that recalibrating the " + }, + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 621, + 506, + 732 + ], + "type": "text", + "content": " distance according to the local concentration of the dataset works better for smaller datasets (Yoon et al., 2023; Stein et al., 2023), and that using retrieval techniques such as distance in SSD feature space (Pizzi et al., 2022) works better still, especially for more complex, higher-resolution images (Somepalli et al., 2023a). However, all of these retrieval techniques are too expensive to be used to withhold samples from a live model. To more efficiently prevent memorized samples from being generated, past and concurrent works have altered the sampling procedure, training procedure, or the model itself (Wen et al., 2023; Daras et al., 2024; Chen et al., 2024; Hintersdorf et al., 2024)." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 193 + ], + "type": "text", + "content": "Explaining Memorization There is an active community effort attempting to explain why and how memorization occurs in DGMs. Early studies focused on GANs, and have taken both theoretical (Nagarajan et al., 2018) and empirical (Bai et al., 2021) perspectives. However, GANs are thought to be less prone to memorization than DMs (Akbar et al., 2025), except on small datasets (Feng et al., 2021). Several works on DMs (Pidstrigach, 2022; Yi et al., 2023; Gu et al., 2023; Li et al., 2024) have pointed out that, given sufficient capacity, DMs at optimality are capable of learning the empirical training distribution, which is complete memorization. Others have focused on generalization, showing that DMs are capable of generalizing well in theory (Li et al., 2023), have inductive biases towards generating photorealistic images (Kadkhodaie et al., 2024), and will generalize when their capacity is insufficient to memorize (Yoon et al., 2023)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": "DGM-Based LID Estimation As opposed to statistical LID estimators (e.g., Levina & Bickel (2004)), which are constructed to estimate the dimension of " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": ", DGM-based ones estimate the dimensionality of " + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 104, + 205, + 506, + 350 + ], + "type": "text", + "content": ", the manifold learned by a DGM. These types of estimators are available for many types of DGMs, and in addition to being useful for memorization, have found utility in out-of-distribution detection (Kamkari et al., 2024a). In the literature, LID estimators for normalizing flows (Dinh et al., 2014) have been proposed using the singular values of their Jacobians (Horvat & Pfister, 2022; Kamkari et al., 2024a) or their density estimates (Tempczyk et al., 2022). In Section 4 we applied the singular value method to obtain LID estimates for GANs. Dai & Wipf (2019) and Zheng et al. (2022) proposed estimators for VAEs (Kingma & Welling, 2014; Rezende et al., 2014) using the structure of their posterior distribution. Several authors have proposed estimators for DMs as well (Stanczuk et al., 2024; Kamkari et al., 2024b; Horvat & Pfister, 2024); we focus on those of Stanczuk et al. (2024) and Kamkari et al. (2024b) because they work with off-the-shelf DMs and do not require modifying the training procedure." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 361, + 387, + 373 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 361, + 387, + 373 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 387, + 373 + ], + "type": "text", + "content": "6 CONCLUSIONS, LIMITATIONS, AND FUTURE WORK" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 381, + 506, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 381, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 381, + 506, + 471 + ], + "type": "text", + "content": "Throughout this work, we have drawn connections between the geometry of a DGM and its propensity to memorize through the MMH. First, we showed that the notion of LID provides a systematic way of understanding different types of memorization. Second, we explained how memorization phenomena described by prior work can be understood from the perspective of LID. Third, we verified the MMH empirically across scales of data and classes of models. Fourth, we showed that controlling " + }, + { + "bbox": [ + 104, + 381, + 506, + 471 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 381, + 506, + 471 + ], + "type": "text", + "content": " is a promising way to mitigate memorization. We offered several connections, including the insight that some instances of memorization in DMs are due to the DM's inability to generalize (OD-Mem), whereas others are due to low-LID ground truth (DD-Mem)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "type": "text", + "content": "Despite having shown that the MMH is a principled avenue to detect and alleviate memorization, our current approaches can be improved: estimates of " + }, + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "type": "text", + "content": " have some overlap between memorized and non-memorized samples and our sample-time scheme for mitigating memorization using " + }, + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}(c)" + }, + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "type": "text", + "content": " performs on par with, but does not outperform, its more ad-hoc variant using " + }, + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}(c)" + }, + { + "bbox": [ + 104, + 475, + 506, + 575 + ], + "type": "text", + "content": ". We expect future work to find even better ways of leveraging the MMH and LID towards these goals; e.g. by improving LID estimation or more efficiently controlling LID during sampling. Finally, although the manifold hypothesis does not apply directly to discrete data such as language, some intuitions described in this work carry over, and generalizations or parallels to the concepts here may offer insights for the language-modelling space." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 586, + 506, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 506, + 675 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 506, + 675 + ], + "type": "text", + "content": "Reproducibility Statement To ensure the reproducibility of our experiments, we provide two codebase links. The first codebase, accessible at github.com/layer6ai-labs/dgm-geometry, contains our small-scale synthetic experiments and our CIFAR10 experiments. The second, accessible at github.com/layer6ai-labs/diffusion_memorization/, extends the work of Wen et al. (2023) to use the MMH to detect and mitigate memorization. Comprehensive details of our experimental setup are provided across Section 4, Appendix C, and Appendix D. All datasets used in our experiments are freely available from the referenced sources and are utilized in compliance with their respective licenses." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "Ethics Statement We do not foresee any ethical concerns with this research. The overarching topic, memorization in generative models, is widely studied to better understand safety concerns associated with using and deploying such models. Our goal is to theoretically explain and to empirically detect and alleviate this phenomenon; we do not promote the use of these models for harmful practices." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 201, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 101, + 397, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 101, + 397, + 114 + ], + "spans": [ + { + "bbox": [ + 105, + 101, + 397, + 114 + ], + "type": "text", + "content": "We thank Kin Kwan Leung for his valuable help in proving Lemma E.4." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 129, + 176, + 141 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 129, + 176, + 141 + ], + "spans": [ + { + "bbox": [ + 106, + 129, + 176, + 141 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 148, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 105, + 148, + 505, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 148, + 505, + 182 + ], + "spans": [ + { + "bbox": [ + 105, + 148, + 505, + 182 + ], + "type": "text", + "content": "Muhammad Usman Akbar, Wuhao Wang, and Anders Eklund. Beware of diffusion models for synthesizing medical images-a comparison with gans in terms of memorizing brain mri and chest x-ray images. Machine Learning: Science and Technology, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 188, + 506, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 188, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 105, + 188, + 506, + 233 + ], + "type": "text", + "content": "Ching-Yuan Bai, Hsuan-Tien Lin, Colin Raffel, and Wendy Chi-wen Kan. On training sample memorization: Lessons from benchmarking generative modeling with a large-scale competition. In Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery & Data Mining, pp. 2534-2542, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 239, + 506, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 239, + 506, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 239, + 506, + 274 + ], + "type": "text", + "content": "Yoshua Bengio, Aaron Courville, and Pascal Vincent. Representation learning: A review and new perspectives. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(8):1798-1828, 2013." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 280, + 506, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 280, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 506, + 304 + ], + "type": "text", + "content": "Robi Bhattacharjee, Sanjoy Dasgupta, and Kamalika Chaudhuri. Data-copying in generative models: a formal framework. In International Conference on Machine Learning, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 309, + 427, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 309, + 427, + 323 + ], + "spans": [ + { + "bbox": [ + 105, + 309, + 427, + 323 + ], + "type": "text", + "content": "G. Bradski. The OpenCV Library. Dr. Dobb's Journal of Software Tools, 2000." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 328, + 505, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 328, + 505, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 328, + 505, + 363 + ], + "type": "text", + "content": "Bradley CA Brown, Anthony L Caterini, Brendan Leigh Ross, Jesse C Cresswell, and Gabriel Loaiza-Ganem. Verifying the union of manifolds hypothesis for image data. In International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 369, + 505, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 369, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 369, + 505, + 403 + ], + "type": "text", + "content": "Nicholas Carlini, Daphne Ippolito, Matthew Jagielski, Katherine Lee, Florian Tramer, and Chiyuan Zhang. Quantifying memorization across neural language models. In *The Eleventh International Conference on Learning Representations*, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 410, + 505, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 410, + 505, + 444 + ], + "spans": [ + { + "bbox": [ + 105, + 410, + 505, + 444 + ], + "type": "text", + "content": "Nicolas Carlini, Jamie Hayes, Milad Nasr, Matthew Jagielski, Vikash Sehwag, Florian Tramer, Borja Balle, Daphne Ippolito, and Eric Wallace. Extracting training data from diffusion models. In 32nd USENIX Security Symposium, pp. 5253-5270, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 450, + 504, + 474 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 450, + 504, + 474 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 504, + 474 + ], + "type": "text", + "content": "Chen Chen, Daochang Liu, and Chang Xu. Towards memorization-free diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 479, + 504, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 479, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 105, + 479, + 504, + 503 + ], + "type": "text", + "content": "Bin Dai and David Wipf. Diagnosing and enhancing VAE models. In International Conference on Learning Representations, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 509, + 505, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 509, + 505, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 509, + 505, + 544 + ], + "type": "text", + "content": "Giannis Daras, Alex Dimakis, and Constantinos Costis Daskalakis. Consistent Diffusion Meets Tweedie: Training Exact Ambient Diffusion Models with Noisy Data. In International Conference on Machine Learning, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 550, + 504, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 550, + 504, + 573 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 504, + 573 + ], + "type": "text", + "content": "Laurent Dinh, David Krueger, and Yoshua Bengio. NICE: Non-linear independent components estimation. arXiv:1410.8516, 2014." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 579, + 505, + 614 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 579, + 505, + 614 + ], + "spans": [ + { + "bbox": [ + 105, + 579, + 505, + 614 + ], + "type": "text", + "content": "Qianli Feng, Chenqi Guo, Fabian Benitez-Quiroz, and Aleix M Martinez. When do GANs replicate? on the choice of dataset size. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 6701-6710, 2021." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 620, + 458, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 620, + 458, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 620, + 458, + 633 + ], + "type": "text", + "content": "G.B. Folland. Real Analysis: Modern Techniques and Their Applications. Wiley, 2013." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 639, + 506, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 506, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 506, + 662 + ], + "type": "text", + "content": "Keinosuke Fukunaga and David R Olsen. An algorithm for finding intrinsic dimensionality of data. IEEE Transactions on Computers, 100(2):176-183, 1971." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 668, + 506, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 506, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 506, + 703 + ], + "type": "text", + "content": "Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems, volume 27, 2014." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 504, + 732 + ], + "type": "text", + "content": "Xiangming Gu, Chao Du, Tianyu Pang, Chongxuan Li, Min Lin, and Ye Wang. On memorization in diffusion models. arXiv:2310.02664, 2023." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 761 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 116 + ], + "type": "text", + "content": "Dominik Hintersdorf, Lukas Struppek, Kristian Kersting, Adam Dziedzic, and Franziska Boenisch. Finding NeMo: Localizing Neurons Responsible For Memorization in Diffusion Models. arXiv:2406.02366, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 121, + 505, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 121, + 505, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 121, + 505, + 146 + ], + "type": "text", + "content": "Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Advances in Neural Information Processing Systems, volume 33, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 152, + 506, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 152, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 506, + 176 + ], + "type": "text", + "content": "Christian Horvat and Jean-Pascal Pfister. Intrinsic dimensionality estimation using normalizing flows. In Advances in Neural Information Processing Systems, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 180, + 506, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 180, + 506, + 215 + ], + "spans": [ + { + "bbox": [ + 105, + 180, + 506, + 215 + ], + "type": "text", + "content": "Christian Horvat and Jean-Pascal Pfister. On gauge freedom, conservativity and intrinsic dimensionality estimation in diffusion models. In *The Twelfth International Conference on Learning Representations*, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 220, + 498, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 220, + 498, + 235 + ], + "spans": [ + { + "bbox": [ + 105, + 220, + 498, + 235 + ], + "type": "text", + "content": "Hugging Face. Tuxemon. https://huggingface.co/datasets/diffusers/tuxemon, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 239, + 506, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 239, + 506, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 239, + 506, + 274 + ], + "type": "text", + "content": "Ahmed Imtiaz Humayun, Ibtihel Amara, Candice Schumann, Golnoosh Farnadi, Negar Rostamzadeh, and Mohammad Havaei. Understanding the local geometry of generative model manifolds. arXiv e-prints, pp. arXiv-2408, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 280, + 506, + 313 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 280, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 506, + 313 + ], + "type": "text", + "content": "Michael F Hutchinson. A stochastic estimator of the trace of the influence matrix for laplacian smoothing splines. Communications in Statistics-Simulation and Computation, 18(3):1059-1076, 1989." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 319, + 506, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 319, + 506, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 506, + 354 + ], + "type": "text", + "content": "Zahra Kadkhodaie, Florentin Guth, Eero P Simoncelli, and Stéphane Mallat. Generalization in diffusion models arises from geometry-adaptive harmonic representation. In International Conference on Learning Representations, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 360, + 506, + 396 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 360, + 506, + 396 + ], + "spans": [ + { + "bbox": [ + 105, + 360, + 506, + 396 + ], + "type": "text", + "content": "Hamidreza Kamkari, Brendan Leigh Ross, Jesse C Cresswell, Anthony L Caterini, Rahul G Krishnan, and Gabriel Loaiza-Ganem. A geometric explanation of the likelihood OOD detection paradox. In International Conference on Machine Learning, 2024a." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 400, + 506, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 400, + 506, + 436 + ], + "spans": [ + { + "bbox": [ + 105, + 400, + 506, + 436 + ], + "type": "text", + "content": "Hamidreza Kamkari, Brendan Leigh Ross, Rasa Hosseinzadeh, Jesse C Cresswell, and Gabriel Loaiza-Ganem. A geometric view of data complexity: Efficient local intrinsic dimension estimation with diffusion models. In Advances in Neural Information Processing Systems, volume 37, 2024b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 441, + 505, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 505, + 476 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 505, + 476 + ], + "type": "text", + "content": "Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on Computer Vision and Pattern Recognition, pp. 4401-4410, 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 481, + 505, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 481, + 505, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 481, + 505, + 516 + ], + "type": "text", + "content": "Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. In Advances in Neural Information Processing Systems, volume 33, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 521, + 505, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 521, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 505, + 545 + ], + "type": "text", + "content": "Diederik P Kingma and Max Welling. Auto-encoding variational Bayes. In International Conference on Learning Representations, 2014." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 551, + 506, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 551, + 506, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 551, + 506, + 574 + ], + "type": "text", + "content": "Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. Technical Report, 2009." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 580, + 408, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 580, + 408, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 580, + 408, + 594 + ], + "type": "text", + "content": "John M Lee. Introduction to Smooth Manifolds. Springer New York, 2012." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 598, + 505, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 598, + 505, + 623 + ], + "spans": [ + { + "bbox": [ + 105, + 598, + 505, + 623 + ], + "type": "text", + "content": "Elizaveta Levina and Peter Bickel. Maximum likelihood estimation of intrinsic dimension. In Advances in Neural Information Processing Systems, 2004." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 628, + 505, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 628, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 505, + 651 + ], + "type": "text", + "content": "Puheng Li, Zhong Li, Huishuai Zhang, and Jiang Bian. On the generalization properties of diffusion models. Advances in Neural Information Processing Systems, 36, 2023." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 657, + 506, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 657, + 506, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 506, + 681 + ], + "type": "text", + "content": "Sixu Li, Shi Chen, and Qin Li. A good score does not lead to a good generative model. arXiv:2401.04856, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 687, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 687, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 687, + 506, + 732 + ], + "type": "text", + "content": "Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pp. 740-755. Springer, 2014." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "text", + "content": "Gabriel Loaiza-Ganem, Brendan Leigh Ross, Jesse C Cresswell, and Anthony L. Caterini. Diagnosing and fixing manifold overfitting in deep generative models. Transactions on Machine Learning Research, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 158 + ], + "type": "text", + "content": "Gabriel Loaiza-Ganem, Brendan Leigh Ross, Rasa Hosseinzadeh, Anthony L Caterini, and Jesse C Cresswell. Deep generative models through the lens of the manifold hypothesis: A survey and new connections. Transactions on Machine Learning Research, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 504, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 504, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 504, + 187 + ], + "type": "text", + "content": "Yubin Lu, Zhongjian Wang, and Guillaume Bal. Mathematical analysis of singularities in the diffusion model under the submanifold assumption. arXiv:2301.07882, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 194, + 505, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 194, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 194, + 505, + 228 + ], + "type": "text", + "content": "Casey Meehan, Kamalika Chaudhuri, and Sanjoy Dasgupta. A non-parametric test to detect data-copying in generative models. In International Conference on Artificial Intelligence and Statistics, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 236, + 504, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 504, + 259 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 504, + 259 + ], + "type": "text", + "content": "Vaishnavh Nagarajan, Colin Raffel, and Ian J Goodfellow. Theoretical insights into memorization in gans. In Neural Information Processing Systems Workshop, volume 1, pp. 3, 2018." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 266, + 505, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 266, + 505, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 266, + 505, + 300 + ], + "type": "text", + "content": "Alex Nichol, Aditya Ramesh, Pamela Mishkin, Prafulla Dariwal, Joanne Jang, and Mark Chen. DALL-E 2 Pre-Training Mitigations, June 2022. URL https://openai.com/blog/dall-e-2-pre-training-mitigations/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 307, + 505, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 307, + 505, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 505, + 331 + ], + "type": "text", + "content": "Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In International conference on machine learning, pp. 8162-8171. PMLR, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 337, + 334, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 337, + 334, + 351 + ], + "spans": [ + { + "bbox": [ + 105, + 337, + 334, + 351 + ], + "type": "text", + "content": "OpenAI. Gpt-4. https://openai.com, 2023. GPT-4o." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 357, + 505, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 505, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 505, + 380 + ], + "type": "text", + "content": "William H. Orrick. Andersen v. Stability AI Ltd., 2023. URL https://casetext.com/case/ andersen-v-stability-ai-ltd." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 387, + 505, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 387, + 505, + 411 + ], + "spans": [ + { + "bbox": [ + 105, + 387, + 505, + 411 + ], + "type": "text", + "content": "Jakiw Pidstrigach. Score-based generative models detect manifolds. In Advances in Neural Information Processing Systems, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 417, + 505, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 417, + 505, + 453 + ], + "spans": [ + { + "bbox": [ + 105, + 417, + 505, + 453 + ], + "type": "text", + "content": "Ed Pizzi, Sreya Dutta Roy, Sugosh Nagavara Ravindra, Priya Goyal, and Matthijs Douze. A self-supervised descriptor for image copy detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14532-14542, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 459, + 505, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 459, + 505, + 503 + ], + "spans": [ + { + "bbox": [ + 105, + 459, + 505, + 503 + ], + "type": "text", + "content": "Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pp. 8748-8763. PMLR, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 510, + 505, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 510, + 505, + 545 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 505, + 545 + ], + "type": "text", + "content": "Danilo Jimenez Rezende, Shakir Mohamed, and Daan Wierstra. Stochastic backpropagation and approximate inference in deep generative models. In International Conference on Machine Learning, pp. 1278-1286, 2014." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 552, + 505, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 552, + 505, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 552, + 505, + 586 + ], + "type": "text", + "content": "Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684-10695, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 593, + 505, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 593, + 505, + 628 + ], + "spans": [ + { + "bbox": [ + 105, + 593, + 505, + 628 + ], + "type": "text", + "content": "Brendan Leigh Ross, Gabriel Loaiza-Ganem, Anthony L Caterini, and Jesse C Cresswell. Neural implicit manifold learning for topology-aware generative modelling. Transactions on Machine Learning Research, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 634, + 505, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 634, + 505, + 692 + ], + "spans": [ + { + "bbox": [ + 105, + 634, + 505, + 692 + ], + "type": "text", + "content": "Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, Patrick Schramowski, Srivatsa Kundurthy, Katherine Crowson, Ludwig Schmidt, Robert Kaczmarczyk, and Jenia Jitsev. LAION-5B: An open large-scale dataset for training next generation image-text models. In Advances in Neural Information Processing Systems, volume 35, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 697, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 697, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 505, + 732 + ], + "type": "text", + "content": "Jascha Sohl-Dickstein, Eric Weiss, Niru Maheswaranathan, and Surya Ganguli. Deep unsupervised learning using nonequilibrium thermodynamics. In International Conference on Machine Learning, pp. 2256-2265, 2015." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 651 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 116 + ], + "type": "text", + "content": "Gowthami Somepalli, Vasu Singla, Micah Goldblum, Jonas Geiping, and Tom Goldstein. Diffusion art or digital forgery? investigating data replication in diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023a." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "text", + "content": "Gowthami Somepalli, Vasu Singla, Micah Goldblum, Jonas Geiping, and Tom Goldstein. Understanding and mitigating copying in diffusion models. In Advances in Neural Information Processing Systems, volume 36, 2023b." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 505, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 505, + 187 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 505, + 187 + ], + "type": "text", + "content": "Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. arXiv:2010.02502, October 2020. URL https://arxiv.org/abs/2010.02502." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 193, + 505, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 193, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 105, + 193, + 505, + 228 + ], + "type": "text", + "content": "Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 233, + 505, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 233, + 505, + 269 + ], + "spans": [ + { + "bbox": [ + 105, + 233, + 505, + 269 + ], + "type": "text", + "content": "Jan Pawel Stanczuk, Georgios Batzolis, Teo Deveney, and Carola-Bibiane Schonlieb. Diffusion models encode the intrinsic dimension of data manifolds. In Proceedings of the 41st International Conference on Machine Learning, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 274, + 505, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 274, + 505, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 274, + 505, + 320 + ], + "type": "text", + "content": "George Stein, Jesse C Cresswell, Rasa Hosseinzadeh, Yi Sui, Brendan Ross, Valentin Villecloze, Zhaoyan Liu, Anthony L Caterini, J Eric T Taylor, and Gabriel Loaiza-Ganem. Exposing flaws of generative model evaluation metrics and their unfair treatment of diffusion models. In Advances in Neural Information Processing Systems, volume 36, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 327, + 505, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 327, + 505, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 327, + 505, + 361 + ], + "type": "text", + "content": "Piotr Tempczyk, Rafal Michaluk, Lukasz Garncarek, Przemysław Spurek, Jacek Tabor, and Adam Golinski. LIDL: Local intrinsic dimension estimation using approximate likelihood. In International Conference on Machine Learning, pp. 21205-21231, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 367, + 381, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 367, + 381, + 380 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 381, + 380 + ], + "type": "text", + "content": "Tuxemon Project. Tuxemon, 2024. URL https://tuxemon.org/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 386, + 505, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 386, + 505, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 386, + 505, + 410 + ], + "type": "text", + "content": "Arash Vahdat, Karsten Kreis, and Jan Kautz. Score-based generative modeling in latent space. In Advances in Neural Information Processing Systems, volume 34, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 416, + 505, + 440 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 416, + 505, + 440 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 505, + 440 + ], + "type": "text", + "content": "Nikhil Vyas, Sham M Kakade, and Boaz Barak. On provable copyright protection for generative models. In International Conference on Machine Learning, pp. 35277-35299. PMLR, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 445, + 505, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 445, + 505, + 469 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 505, + 469 + ], + "type": "text", + "content": "Ryan Webster. A reproducible extraction of training images from diffusion models. arXiv:2305.08694, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 476, + 505, + 500 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 476, + 505, + 500 + ], + "spans": [ + { + "bbox": [ + 105, + 476, + 505, + 500 + ], + "type": "text", + "content": "Ryan Webster, Julien Rabin, Loic Simon, and Frederic Jurie. This person (probably) exists. Identity membership attacks against GAN generated faces. arXiv:2107.06018, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 506, + 505, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 506, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 505, + 540 + ], + "type": "text", + "content": "Yuxin Wen, Yuchen Liu, Chen Chen, and Lingjuan Lyu. Detecting, explaining, and mitigating memorization in diffusion models. In The Twelfth International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 547, + 505, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 547, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 105, + 547, + 505, + 570 + ], + "type": "text", + "content": "Mingyang Yi, Jiacheng Sun, and Zhenguo Li. On the generalization of diffusion model. arXiv:2305.14712, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 576, + 505, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 505, + 611 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 505, + 611 + ], + "type": "text", + "content": "TaeHo Yoon, Joo Young Choi, Sehyun Kwon, and Ernest K Ryu. Diffusion probabilistic models generalize when they fail to memorize. In ICML 2023 Workshop on Structured Probabilistic Inference & Generative Modeling, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 617, + 505, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 617, + 505, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 617, + 505, + 651 + ], + "type": "text", + "content": "Yijia Zheng, Tong He, Yixuan Qiu, and David P Wipf. Learning manifold dimensions with conditional variational autoencoders. In Advances in Neural Information Processing Systems, volume 35, 2022." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 503, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 503, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 503, + 94 + ], + "type": "text", + "content": "A CONTEXTUALIZING THE MMH WITHIN DEFINITIONS OF MEMORIZATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 107, + 506, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 107, + 506, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 107, + 506, + 295 + ], + "type": "text", + "content": "An Overview of Definitions The MMH describes the mechanism through which memorization occurs. How does this mechanism fit into prior definitions of memorization from the literature? Formal definitions of memorization generally follow the same template: a point " + }, + { + "bbox": [ + 104, + 107, + 506, + 295 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 107, + 506, + 295 + ], + "type": "text", + "content": " is memorized when the model's probability measure " + }, + { + "bbox": [ + 104, + 107, + 506, + 295 + ], + "type": "inline_equation", + "content": "P_{\\theta}" + }, + { + "bbox": [ + 104, + 107, + 506, + 295 + ], + "type": "text", + "content": " places too much mass within some distance of " + }, + { + "bbox": [ + 104, + 107, + 506, + 295 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 107, + 506, + 295 + ], + "type": "text", + "content": ". Some of these definitions define memorization globally on the level of an entire model (Meehan et al., 2020; Yoon et al., 2023; Gu et al., 2023), while others define memorization locally for individual datapoints (Carlini et al., 2023; Bhattacharjee et al., 2023). The identical definitions of Yoon et al. (2023) and Gu et al. (2023) consider a point to be memorized based purely on a distance threshold; in practice, however, distances alone have been unsuccessful at consistently surfacing what would be perceived by humans as memorized (Somepalli et al., 2023a; Stein et al., 2023). We postulate this is also due to manifold structure; semantically memorized images such as Figure 2 will sit on the same manifold, but may not necessarily be close to each other as measured by distance, even when taken in the latent space of an encoder. Meanwhile, Carlini et al. (2023) take a privacy perspective; their definition considers images memorized if they can be extracted from a model by any means, not just generated by the model. In this work we take the perspective that memorized samples are most likely to be problematic when they are generated by a production model, which are often treated as a black-box, so we focus on generation rather than extraction." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "type": "text", + "content": "Links Between Formal Memorization and the MMH For the reasons above, we use here the definition of memorization by Bhattacharjee et al. (2023), who define a point " + }, + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "type": "text", + "content": " as memorized by comparing " + }, + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "type": "inline_equation", + "content": "P_{\\theta}" + }, + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "type": "text", + "content": " to the ground truth " + }, + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "type": "inline_equation", + "content": "P_*" + }, + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "type": "text", + "content": " in a neighbourhood of " + }, + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 310, + 504, + 344 + ], + "type": "text", + "content": ". We present their definition here:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": "Definition A.1. Let " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "P_{*}" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "P_{\\theta}" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": " be the ground truth and model probability measures, respectively. Let " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "\\lambda > 1" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "0 < \\gamma < 1" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": ". A point " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "(\\lambda, \\gamma)" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": "-copy of a training datapoint " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": " if there exists a radius " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "r > 0" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": " such that the " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": "-dimensional ball " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "B_r^d(x_0)" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": " of radius " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": " centred at " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": " satisfies (i) " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "x \\in B_r^d(x_0)" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": ", (ii) " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "P_{\\theta}(B_r^d(x_0)) \\geq \\lambda P_{*}(B_r^d(x_0))" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": ", and (iii) " + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "inline_equation", + "content": "P_{*}(B_r^d(x_0)) \\leq \\gamma" + }, + { + "bbox": [ + 104, + 348, + 506, + 394 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "type": "text", + "content": "The first and third conditions imply that " + }, + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "type": "text", + "content": " is sufficiently close to " + }, + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "type": "text", + "content": " relative to the amount of probability mass in the region " + }, + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "type": "inline_equation", + "content": "(P_{*}(B_{r}(x_{0})))" + }, + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "type": "text", + "content": ", while the second condition implies that the model " + }, + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "type": "inline_equation", + "content": "P_{\\theta}" + }, + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "type": "text", + "content": " places much more mass in the region compared to the ground truth " + }, + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "type": "inline_equation", + "content": "P_{*}" + }, + { + "bbox": [ + 104, + 403, + 504, + 460 + ], + "type": "text", + "content": ". A natural question about the MMH is whether points satisfying it are also formally memorized by the above definition. The answer is in the negative for DD-Mem (Proposition A.2) and the affirmative for OD-Mem (Theorem A.3)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 464, + 504, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 504, + 487 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 504, + 487 + ], + "type": "text", + "content": "Proposition A.2. There exist models " + }, + { + "bbox": [ + 104, + 464, + 504, + 487 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 464, + 504, + 487 + ], + "type": "text", + "content": " that exhibit DD-Mem at " + }, + { + "bbox": [ + 104, + 464, + 504, + 487 + ], + "type": "inline_equation", + "content": "x_0 \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 464, + 504, + 487 + ], + "type": "text", + "content": ", but do not generate " + }, + { + "bbox": [ + 104, + 464, + 504, + 487 + ], + "type": "inline_equation", + "content": "(\\lambda, \\gamma)" + }, + { + "bbox": [ + 104, + 464, + 504, + 487 + ], + "type": "text", + "content": "-copies of " + }, + { + "bbox": [ + 104, + 464, + 504, + 487 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 464, + 504, + 487 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "text", + "content": "Proof. Choose any ground truth distribution " + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "inline_equation", + "content": "P_{*}" + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "text", + "content": " on a manifold " + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "text", + "content": " with a low-LID point " + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "inline_equation", + "content": "x_0 \\in \\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "text", + "content": " (for example, set " + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x_0) = 0" + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "text", + "content": "). A perfect model will exhibit DD-Mem at " + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "text", + "content": ", but for any ball " + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "inline_equation", + "content": "B_r^d(x_0)" + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "text", + "content": " containing " + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "inline_equation", + "content": "P_{\\theta}(B_r^d(x_0)) = P_{*}(B_r^d(x_0))" + }, + { + "bbox": [ + 104, + 503, + 506, + 539 + ], + "type": "text", + "content": ", violating the second condition of Definition A.1." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "type": "text", + "content": "Since DD-Mem is a consequence of the data distribution having points " + }, + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "type": "text", + "content": " with inherently low " + }, + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x_0)" + }, + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "type": "text", + "content": ", these memorized points are likely to be generated even when there is no excess probability mass assigned near " + }, + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "type": "inline_equation", + "content": "P_{\\theta}" + }, + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "type": "text", + "content": ", as required in the definition of " + }, + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "type": "inline_equation", + "content": "(\\lambda, \\gamma)" + }, + { + "bbox": [ + 104, + 554, + 504, + 588 + ], + "type": "text", + "content": "-copies." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "text", + "content": "Theorem A.3 (Informal). Suppose " + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "inline_equation", + "content": "x_0 \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "text", + "content": " is such that " + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "text", + "content": " exhibits OD-Mem at " + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "text", + "content": ". Then, for every " + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "inline_equation", + "content": "\\lambda > 1" + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "inline_equation", + "content": "0 < \\gamma < 1" + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "text", + "content": " will generate " + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "inline_equation", + "content": "(\\lambda, \\gamma)" + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "text", + "content": "-copies of " + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 597, + 504, + 620 + ], + "type": "text", + "content": " with near-certainty." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 637, + 405, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 637, + 405, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 637, + 405, + 649 + ], + "type": "text", + "content": "Proof. See Appendix E for the formal statement of the theorem and proof." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 665, + 506, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 731 + ], + "type": "text", + "content": "The MMH thus provides two important pieces of context for the definition of " + }, + { + "bbox": [ + 104, + 665, + 506, + 731 + ], + "type": "inline_equation", + "content": "(\\lambda, \\gamma)" + }, + { + "bbox": [ + 104, + 665, + 506, + 731 + ], + "type": "text", + "content": "-copies. The first is that Definition A.1 is in some sense incomplete; it does not cover DD-Mem. The second is that OD-Mem can be considered a useful refinement of Definition A.1. While the algorithm given by Bhattacharjee et al. (2023) is intractable at scale, we show in Section 4 that the added strength (in the mathematical sense) of the MMH allows us to flag memorized data more efficiently using only estimators of LID." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 219, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 219, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 219, + 94 + ], + "type": "text", + "content": "B LID ESTIMATION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 323, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 323, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 323, + 118 + ], + "type": "text", + "content": "B.1 LID ESTIMATION WITH DIFFUSION MODELS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 126, + 506, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 126, + 506, + 149 + ], + "spans": [ + { + "bbox": [ + 104, + 126, + 506, + 149 + ], + "type": "text", + "content": "As mentioned in the main manuscript, we follow the SDE framework of Song et al. (2021) for DMs, where the so-called forward SDE is given by" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 216, + 156, + 505, + 167 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 156, + 505, + 167 + ], + "spans": [ + { + "bbox": [ + 216, + 156, + 505, + 167 + ], + "type": "interline_equation", + "content": "\\mathrm {d} x _ {t} = f \\left(x _ {t}, t\\right) \\mathrm {d} t + g (t) \\mathrm {d} W _ {t}, \\quad x _ {0} \\sim p _ {*} (x), \\tag {3}", + "image_path": "03eee66e17c7f315991a922de485053bb5c7b2a2dc1e2ec3194c975581e0517a.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "inline_equation", + "content": "f: \\mathbb{R}^d \\times [0,1] \\to \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "inline_equation", + "content": "g: [0,1] \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "text", + "content": " are pre-specified functions and " + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "inline_equation", + "content": "W_t" + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "text", + "content": " is a Brownian motion on " + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "text", + "content": ". This process progressively adds noise to data from " + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "inline_equation", + "content": "p_*(x)" + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "text", + "content": ", and we denote the distribution of " + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "inline_equation", + "content": "x_t" + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "inline_equation", + "content": "p_t(x_t)" + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "text", + "content": ". This process can be reversed in time in the sense that if " + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "inline_equation", + "content": "y_t := x_{1-t}" + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "inline_equation", + "content": "y_t" + }, + { + "bbox": [ + 104, + 171, + 504, + 217 + ], + "type": "text", + "content": " obeys the so-called backward SDE," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 146, + 223, + 505, + 237 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 223, + 505, + 237 + ], + "spans": [ + { + "bbox": [ + 146, + 223, + 505, + 237 + ], + "type": "interline_equation", + "content": "\\mathrm {d} y _ {t} = \\left[ g ^ {2} (1 - t) \\nabla \\log p _ {1 - t} (y _ {t}) - f (y _ {t}, 1 - t) \\right] \\mathrm {d} t + g (1 - t) \\mathrm {d} \\tilde {W} _ {t}, \\quad y _ {0} \\sim p _ {1}, \\tag {4}", + "image_path": "b1284d9ed44238343793d3aae24aadf4213b8ab85e3e6bb10a7efaacc0ba36fa.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "inline_equation", + "content": "\\tilde{W}_t" + }, + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "text", + "content": " is another Brownian motion. DMs aim to learn the (Stein) score function, " + }, + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "inline_equation", + "content": "\\nabla \\log p_t(x_t)" + }, + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "text", + "content": ", by approximating it with a neural network " + }, + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "inline_equation", + "content": "s_\\theta : \\mathbb{R}^d \\times (0,1] \\to \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "text", + "content": ". Once the network is trained, " + }, + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "inline_equation", + "content": "s_\\theta(x_t,t) \\approx \\nabla \\log p_t(x_t)" + }, + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "text", + "content": " is plugged in Equation 4, and solving the resulting SDE transforms noise into model samples. Below we briefly summarize two existing methods, FLIPD and NB, for approximating " + }, + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_\\theta(x)" + }, + { + "bbox": [ + 104, + 243, + 506, + 300 + ], + "type": "text", + "content": " for a DM." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 304, + 504, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 328 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 328 + ], + "type": "text", + "content": "FLIPD Kamkari et al. (2024b) proposed FLIPD, an estimator of " + }, + { + "bbox": [ + 104, + 304, + 504, + 328 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 304, + 504, + 328 + ], + "type": "text", + "content": " for DMs. Commonly " + }, + { + "bbox": [ + 104, + 304, + 504, + 328 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 304, + 504, + 328 + ], + "type": "text", + "content": " is linear in " + }, + { + "bbox": [ + 104, + 304, + 504, + 328 + ], + "type": "inline_equation", + "content": "x_{t}" + }, + { + "bbox": [ + 104, + 304, + 504, + 328 + ], + "type": "text", + "content": ", in which case the transition kernel corresponding to the forward SDE is given by" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 224, + 333, + 504, + 346 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 333, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 224, + 333, + 504, + 346 + ], + "type": "interline_equation", + "content": "p _ {t \\mid 0} \\left(x _ {t} \\mid x _ {0}\\right) = \\mathcal {N} \\left(x _ {t}; \\psi (t) x _ {0}, \\sigma^ {2} (t) I _ {d}\\right), \\tag {5}", + "image_path": "a5aba19178868ea84b9946964be08a462d08ffb8f5850ce1c75cd87adafe6672.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "type": "inline_equation", + "content": "\\psi, \\sigma: [0,1] \\to \\mathbb{R}" + }, + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "type": "text", + "content": " are known functions which depend on the choices of " + }, + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 104, + 352, + 504, + 374 + ], + "type": "text", + "content": ", and which can be easily evaluated. For a DM with such a transition kernel, FLIPD is defined as" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 156, + 381, + 505, + 400 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 381, + 505, + 400 + ], + "spans": [ + { + "bbox": [ + 156, + 381, + 505, + 400 + ], + "type": "interline_equation", + "content": "\\operatorname {F L I P D} (x, t _ {0}) = d + \\sigma^ {2} (t _ {0}) \\left(\\operatorname {t r} \\left(\\nabla s _ {\\theta} (\\psi (t _ {0}) x, t _ {0})\\right) + \\| s _ {\\theta} (\\psi (t _ {0}) x, t _ {0}) \\| ^ {2}\\right), \\tag {6}", + "image_path": "c23be7f55fb70fd22c54257e4c38d06448b412d352b511869c9203247937ce50.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "spans": [ + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "inline_equation", + "content": "t_0 \\in [0,1]" + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "text", + "content": " is a hyperparameter. Kamkari et al. (2024b) proved that, when " + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "inline_equation", + "content": "t_0 \\approx 0" + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "inline_equation", + "content": "\\mathrm{FLIPD}(x,t_0)" + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "text", + "content": " is a valid approximation of " + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "text", + "content": ". The reason for this is that the rate of change of the log density of the convolution between " + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "text", + "content": " and a Gaussian evaluated at " + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "text", + "content": " with respect to the amount of added Gaussian noise approximates " + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x_0)" + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "text", + "content": ", and Kamkari et al. (2024b) showed that FLIPD computes this rate of change. In practice, computing the trace of the Jacobian of " + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "inline_equation", + "content": "s_{\\theta}" + }, + { + "bbox": [ + 104, + 406, + 505, + 483 + ], + "type": "text", + "content": " is the only expensive operation needed to compute FLIPD, and this is easily approximated by using the Hutchinson stochastic trace estimator (Hutchinson, 1989)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "content": "NB Stanczuk et al. (2024) proposed another estimator of " + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "content": " for DMs. Following Kamkari et al. (2024b), we refer to this estimator as the normal bundle (NB) estimator. Stanczuk et al. (2024) proved that when " + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "inline_equation", + "content": "f(x_{t},t)\\equiv 0" + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "inline_equation", + "content": "s_\\theta (x_t,t)" + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "content": " points orthogonally towards " + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "inline_equation", + "content": "t\\to 0" + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "content": ". They leverage this observation as follows: for a given " + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "content": ", Equation 3 is started at " + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "content": " and run forward until time " + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "inline_equation", + "content": "t_0" + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "content": "; this is done " + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "content": " times, resulting in " + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "inline_equation", + "content": "x_{t_0}^{(1)},\\ldots ,x_{t_0}^{(k)}" + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "content": ". The matrix " + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "inline_equation", + "content": "S_{\\theta}(x,t_0)\\in \\mathbb{R}^{d\\times k}" + }, + { + "bbox": [ + 104, + 489, + 506, + 548 + ], + "type": "text", + "content": " is then constructed as" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 212, + 553, + 505, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 212, + 553, + 505, + 574 + ], + "spans": [ + { + "bbox": [ + 212, + 553, + 505, + 574 + ], + "type": "interline_equation", + "content": "S _ {\\theta} (x, t _ {0}) = \\left[ s _ {\\theta} \\left(x _ {t _ {0}} ^ {(1)}, t _ {0}\\right) \\mid \\dots \\mid s _ {\\theta} \\left(x _ {t _ {0}} ^ {(k)}, t _ {0}\\right) \\right], \\tag {7}", + "image_path": "09c1198eea92f72a1d14fc6dff097c9ca924cb063697bcb418e6c97d8a5538f7.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "type": "text", + "content": "and thanks to the previous observation, the columns of " + }, + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "type": "inline_equation", + "content": "S_{\\theta}(x,t_0)" + }, + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "type": "text", + "content": " approximately span the normal space of " + }, + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "type": "text", + "content": " when " + }, + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "type": "inline_equation", + "content": "t_0 \\approx 0" + }, + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "type": "text", + "content": ", meaning that rank " + }, + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "type": "inline_equation", + "content": "S_{\\theta}(x,t_0) \\approx d - \\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 579, + 504, + 612 + ], + "type": "text", + "content": ". The NB estimator is given by" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 239, + 612, + 504, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 612, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 239, + 612, + 504, + 624 + ], + "type": "interline_equation", + "content": "\\mathrm {N B} (x, t _ {0}) = d - \\operatorname {r a n k} S _ {\\theta} (x, t _ {0}). \\tag {8}", + "image_path": "c163da3fc444d8ae948c99107cbc6aaafbe24eaecbb93e9966cb58ac483d3975.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "text", + "content": "In practice the rank is numerically computed by setting a threshold, carrying out a singular value decomposition of " + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "inline_equation", + "content": "S_{\\theta}(x,t_0)" + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "text", + "content": ", and counting the number of singular values above the threshold. Stanczuk et al. (2024) recommend setting " + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "inline_equation", + "content": "k = 4d" + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "text", + "content": ", and we follow this recommendation. Computing the NB estimator is much more expensive than FLIPD, since " + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "inline_equation", + "content": "4d" + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "text", + "content": " forward calls have to be made to construct " + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "inline_equation", + "content": "S_{\\theta}(x,t_0)" + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "text", + "content": ", and then the singular value decomposition has a cost which is cubic in " + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "text", + "content": ". Finally, we point out that when " + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "inline_equation", + "content": "f" + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "text", + "content": " is not identically equal to 0, the NB method can be easily adapted to still provide a valid approximation of " + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 627, + 505, + 704 + ], + "type": "text", + "content": " (Kamkari et al., 2024a)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 709, + 505, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 505, + 732 + ], + "type": "text", + "content": "We highlight that both FLIPD and NB were originally developed as estimators of " + }, + { + "bbox": [ + 104, + 709, + 505, + 732 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x)" + }, + { + "bbox": [ + 104, + 709, + 505, + 732 + ], + "type": "text", + "content": " under the view that if the learned score function is a good approximation of the true score function, then" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 310, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x) \\approx \\mathrm{LID}_{*}(x)" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": ". In our work, we see these methods as approximating " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": ". Note that these views are not contradictory: when the DM properly approximates the true score function, it will indeed be the case that " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x) \\approx \\mathrm{LID}_{*}(x)" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": "; importantly though, when this approximation fails, we interpret " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "\\mathrm{FLIPD}(x, t_0)" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "\\mathrm{NB}(x, t_0)" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": " as still providing a valid approximation of " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": " rather than a poor estimate of " + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{*}(x)" + }, + { + "bbox": [ + 104, + 82, + 504, + 140 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 151, + 317, + 162 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 151, + 317, + 162 + ], + "spans": [ + { + "bbox": [ + 105, + 151, + 317, + 162 + ], + "type": "text", + "content": "B.2 LOCAL PRINCIPAL COMPONENT ANALYSIS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "text", + "content": "Local PCA (Fukunaga & Olsen, 1971) offers a straightforward method for estimating the " + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*" + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "text", + "content": " of a datapoint by using linear local approximations to the data manifold. Given " + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "text", + "content": ", local PCA first identifies a set of nearby points in the dataset, representing a neighbourhood; this is typically done through a " + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "text", + "content": "-nearest neighbours algorithm. Next, the algorithm performs a principal component analysis (PCA) on this neighbourhood to get " + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "inline_equation", + "content": "(i)" + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "text", + "content": " principal components and " + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "inline_equation", + "content": "(ii)" + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "text", + "content": " explained variances for each component; the resulting principal components capture the directions of data variation, with the explained variance showing the amount of variation along each direction. Off-manifold directions are expected to have negligible explained variance. Hence, local PCA determines the number of components with non-zero (or non-negligible) explained variance as an estimate for " + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x)" + }, + { + "bbox": [ + 104, + 171, + 506, + 272 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 284, + 264, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 284, + 264, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 284, + 264, + 295 + ], + "type": "text", + "content": "B.3 LID ESTIMATION WITH GANS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "text", + "content": "We assume the GAN is given by a generator " + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "inline_equation", + "content": "G_{\\theta}:\\mathbb{R}^{d^{\\prime}}\\to \\mathbb{R}^{d}" + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "text", + "content": " which transforms latent variables from a distribution in " + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{d^{\\prime}}" + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "text", + "content": " to the ambient space " + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "text", + "content": ". For a generated sample " + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "inline_equation", + "content": "x = G_{\\theta}(z)" + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "text", + "content": ", we estimate " + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "text", + "content": " as the rank of the Jacobian of the generator, i.e. rank " + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\nabla G_{\\theta}(z)" + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "text", + "content": ". As for the NB estimator with DMs, the rank is numerically computed by thresholding singular values. We highlight that this is a standard approach to estimate " + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 304, + 506, + 373 + ], + "type": "text", + "content": " in decoder-based DGMs (Horvat & Pfister, 2022; Kamkari et al., 2024a; Humayun et al., 2024)." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 79, + 236, + 156 + ], + "blocks": [ + { + "bbox": [ + 107, + 79, + 236, + 156 + ], + "lines": [ + { + "bbox": [ + 107, + 79, + 236, + 156 + ], + "spans": [ + { + "bbox": [ + 107, + 79, + 236, + 156 + ], + "type": "image", + "image_path": "5c0b16ed0567e2327f4b56dc3c4b49dac4a5b8be4abe751e7b601150c871632f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 121, + 159, + 220, + 169 + ], + "lines": [ + { + "bbox": [ + 121, + 159, + 220, + 169 + ], + "spans": [ + { + "bbox": [ + 121, + 159, + 220, + 169 + ], + "type": "text", + "content": "(a) " + }, + { + "bbox": [ + 121, + 159, + 220, + 169 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 121, + 159, + 220, + 169 + ], + "type": "text", + "content": " adjusted by PNG for iDDPM." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 241, + 79, + 369, + 156 + ], + "blocks": [ + { + "bbox": [ + 241, + 79, + 369, + 156 + ], + "lines": [ + { + "bbox": [ + 241, + 79, + 369, + 156 + ], + "spans": [ + { + "bbox": [ + 241, + 79, + 369, + 156 + ], + "type": "image", + "image_path": "677b1fd825a7eab5e78551cc17fae5399b9a084a8507ede1f7c3073df1089e73.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 245, + 159, + 366, + 169 + ], + "lines": [ + { + "bbox": [ + 245, + 159, + 366, + 169 + ], + "spans": [ + { + "bbox": [ + 245, + 159, + 366, + 169 + ], + "type": "text", + "content": "(b) " + }, + { + "bbox": [ + 245, + 159, + 366, + 169 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 245, + 159, + 366, + 169 + ], + "type": "text", + "content": " adjusted by PNG for StyleGAN-ADA2." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 182, + 505, + 205 + ], + "lines": [ + { + "bbox": [ + 104, + 182, + 505, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 505, + 205 + ], + "type": "text", + "content": "Figure 8: Removing and analyzing image complexity as a confounding factor in memorization detection for CIFAR10 (a-b) and Stable Diffusion (c)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 376, + 80, + 504, + 156 + ], + "blocks": [ + { + "bbox": [ + 376, + 80, + 504, + 156 + ], + "lines": [ + { + "bbox": [ + 376, + 80, + 504, + 156 + ], + "spans": [ + { + "bbox": [ + 376, + 80, + 504, + 156 + ], + "type": "image", + "image_path": "4696a08cdf73d0cfec31ddfd6888d146d110275477bf44c5c860d0b66d69e540.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 375, + 160, + 505, + 177 + ], + "lines": [ + { + "bbox": [ + 375, + 160, + 505, + 177 + ], + "spans": [ + { + "bbox": [ + 375, + 160, + 505, + 177 + ], + "type": "text", + "content": "(c) PNG compression length for Stable Diffusion images." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 222, + 257, + 234 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 222, + 257, + 234 + ], + "spans": [ + { + "bbox": [ + 105, + 222, + 257, + 234 + ], + "type": "text", + "content": "C EXPERIMENTAL DETAILS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 246, + 392, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 246, + 392, + 257 + ], + "spans": [ + { + "bbox": [ + 104, + 246, + 392, + 257 + ], + "type": "text", + "content": "C.1 HYPER-PARAMETER SETUP FOR LID ESTIMATION METHODS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "spans": [ + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{LID}}_*" + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "text", + "content": " with Local PCA As established in Appendix B.2, local PCA estimates the intrinsic dimensionality of a datapoint by counting the number of significant explained variances from PCA performed on the datapoint's local neighbourhood, determined by its " + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "text", + "content": " nearest neighbours (" + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "inline_equation", + "content": "k = 100" + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "text", + "content": " in our experiments). Finding the significant explained variances is done through a threshold hyperparameter, " + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "text", + "content": ", where explained variances above " + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "text", + "content": " are considered significant. For our approach in Figure 5d, we introduce two key modifications to better adapt the original Local PCA algorithm for detecting DD-Mem: " + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "inline_equation", + "content": "(i)" + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "text", + "content": " instead of selecting " + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "text", + "content": " individually for each datapoint, we define it globally as the 10th percentile of all explained variances across the entire dataset; " + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "inline_equation", + "content": "(ii)" + }, + { + "bbox": [ + 104, + 266, + 506, + 388 + ], + "type": "text", + "content": " if a datapoint has neighbours within the 10th percentile of all pairwise distances, we restrict the neighbourhood to those points. The second modification allows us to avoid including distant points in the neighbourhood if closer ones already exist and especially helps us detect zero-dimensional point masses." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 401, + 504, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 401, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{LID}}_{\\theta}" + }, + { + "bbox": [ + 104, + 401, + 504, + 472 + ], + "type": "text", + "content": " for GANs As detailed in Appendix B.3, the rank of the Jacobian " + }, + { + "bbox": [ + 104, + 401, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\nabla G_{\\theta}(z)" + }, + { + "bbox": [ + 104, + 401, + 504, + 472 + ], + "type": "text", + "content": " can be used to estimate " + }, + { + "bbox": [ + 104, + 401, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 104, + 401, + 504, + 472 + ], + "type": "text", + "content": ". However, in practice, the rank — or, equivalently, the number of non-zero singular values — tends to equal the latent dimension; this is because singular values are typically close to zero but rarely exactly zero. To account for this, we apply a thresholding approach: a singular value is considered significant (non-zero) if it exceeds a hyperparameter " + }, + { + "bbox": [ + 104, + 401, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 401, + 504, + 472 + ], + "type": "text", + "content": ". We define " + }, + { + "bbox": [ + 104, + 401, + 504, + 472 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 401, + 504, + 472 + ], + "type": "text", + "content": " as the 10th percentile of all singular values computed from the generated images in Figure 5b and Figure 8b." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{LID}}_{\\theta}" + }, + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "text", + "content": " with NB We used " + }, + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "inline_equation", + "content": "t_0 = 0.1" + }, + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "text", + "content": " and thresholded the singular values of " + }, + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "inline_equation", + "content": "S_{\\theta}(x,t_0)" + }, + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "text", + "content": " by 10th percentile; the results are presented in Figure 5c and Figure 8a. The choice of " + }, + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "inline_equation", + "content": "t_0" + }, + { + "bbox": [ + 104, + 483, + 504, + 529 + ], + "type": "text", + "content": " is empirically determined by observing how the NB score correlates with the memorization behavior with a fixed subset of 1000 randomly generated samples." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathbf{LID}}_{\\theta}" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": " with FLIPD Unless stated otherwise, we set " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "t_0 = 0.05" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": " for FLIPD and use the Hutchinson trace estimator to approximate the trace of the score gradient in Equation 6. In line with Kamkari et al. (2024b), we apply this in the latent space of Stable diffusion and use a single Hutchinson sample to estimate Equation 6 for all of our large-scale experiments." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "spans": [ + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "text", + "content": "CFG Norm for Detecting Memorized Samples in the Training Set Note that while Wen et al. (2023) use the generation process to measure whether a synthesized image has been memorized, we were interested in detecting whether real, training-set images have been memorized in Figure 6, which requires some methodological changes. To compute a memorization score, we take " + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "text", + "content": " Euler steps forward using the conditional score " + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "inline_equation", + "content": "s_{\\theta}(x; t, c)" + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "text", + "content": " with the probability flow ODE (Song et al., 2021) until time " + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "inline_equation", + "content": "t_0" + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "text", + "content": " to get a point at " + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "inline_equation", + "content": "x_0 \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "text", + "content": ". We then compute the CFG norm " + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "inline_equation", + "content": "\\| s_{\\theta}(x_0; t_0, c) - s_{\\theta}(x; t_0, \\emptyset) \\|" + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "text", + "content": ". We use timestep " + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "inline_equation", + "content": "t_0 = 0.01" + }, + { + "bbox": [ + 104, + 598, + 506, + 676 + ], + "type": "text", + "content": " and 3 Euler steps." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 689, + 473, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 473, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 473, + 700 + ], + "type": "text", + "content": "C.2 THE CONFOUNDING EFFECT OF COMPLEXITY FOR DETECTING MEMORIZATION" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 709, + 506, + 733 + ], + "type": "text", + "content": " is correlated with image complexity (Section 2; Kamkari et al. (2024b)), which raises a valid concern: the correlation, combined with the fact that simpler images are more likely to be memorized," + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 107, + 82, + 504, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 82, + 504, + 240 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 504, + 240 + ], + "type": "text", + "content": "suggests that image complexity may confound our analysis. This is evident in Figure 5a (right panel), where GAN-generated images with the lowest " + }, + { + "bbox": [ + 107, + 82, + 504, + 240 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 107, + 82, + 504, + 240 + ], + "type": "text", + "content": " values are the simplest ones, not necessarily the memorized ones. To address this confounding factor, we draw inspiration from Kamkari et al. (2024b) and normalize it by PNG compression length, using it as a proxy for image complexity. We use the maximum compression level of 9 with the cv2 package (Bradski, 2000). According to this adjusted metric, the smallest values now correspond to memorized images that are not necessarily simple, such as the cars in CIFAR10. Figure 8a and Figure 8b show these adjusted LID estimated values, which achieve a slightly improved separation between memorized and not memorized images (as well as between exactly memorized and reconstructively memorized images) than the non-PNG-normalized results in the main text. It is worth noting that complexity did not appear to be a confounding factor in the Stable Diffusion analysis shown in Figure 6. In fact, as depicted in Figure 8c, the Tuxemon images are relatively simpler than the LAION memorized images, as measured by their PNG compression length. However, despite their simplicity, Tuxemon images have consistently higher " + }, + { + "bbox": [ + 107, + 82, + 504, + 240 + ], + "type": "inline_equation", + "content": "\\widehat{\\mathrm{LID}}_{\\theta}" + }, + { + "bbox": [ + 107, + 82, + 504, + 240 + ], + "type": "text", + "content": " values compared to the memorized images in Figure 6b and Figure 6a." + } + ] + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 292, + 37 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 468, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 468, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 468, + 94 + ], + "type": "text", + "content": "D TEXT CONDITIONING AND MEMORIZATION IN STABLE DIFFUSION" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 454, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 454, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 454, + 118 + ], + "type": "text", + "content": "D.1 ADAPTING DENOISING DIFFUSION PROBABILISTIC AND IMPLICIT MODELS" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "type": "text", + "content": "Following Wen et al. (2023), we use denoising diffusion probabilistic models (DDPMs) (Ho et al., 2020). This model can be seen as a discretization of the forward SDE process of a score-based DM (Song et al., 2021). Here, instead of continuous timesteps " + }, + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "type": "text", + "content": ", a timestep " + }, + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "type": "text", + "content": " instead belongs to a sequence " + }, + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "type": "inline_equation", + "content": "\\{0,\\dots T\\}" + }, + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "type": "text", + "content": " being the largest timescale; we use " + }, + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "type": "inline_equation", + "content": "T = 50" + }, + { + "bbox": [ + 104, + 128, + 506, + 184 + ], + "type": "text", + "content": ". We use the colour red to denote the discretized notation used by Ho et al. (2020)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 189, + 504, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 189, + 504, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 189, + 504, + 212 + ], + "type": "text", + "content": "With that in mind, DDPMs can be seen as a Markov noising process with the following transition kernel, parameterized by " + }, + { + "bbox": [ + 104, + 189, + 504, + 212 + ], + "type": "inline_equation", + "content": "\\bar{\\alpha}_t" + }, + { + "bbox": [ + 104, + 189, + 504, + 212 + ], + "type": "text", + "content": ", mirroring the notation of Ho et al. (2020):" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 211, + 218, + 506, + 232 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 218, + 506, + 232 + ], + "spans": [ + { + "bbox": [ + 211, + 218, + 506, + 232 + ], + "type": "interline_equation", + "content": "p _ {t \\mid 0} \\left(x _ {t} \\mid x _ {0}\\right) := \\mathcal {N} \\left(x _ {t}; \\sqrt {\\bar {\\alpha} _ {t}} \\cdot x _ {0}, (1 - \\bar {\\alpha} _ {t}) \\mathbf {I} _ {d}\\right). \\tag {9}", + "image_path": "bb16b191ece8b326baa7eacc051123b25cd296936f7a1b0b1da777c20993015f.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 245, + 506, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 245, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 104, + 245, + 506, + 268 + ], + "type": "text", + "content": "DDPMs do not directly parameterize the score function, but rather use a neural network " + }, + { + "bbox": [ + 104, + 245, + 506, + 268 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}(x_t,t)" + }, + { + "bbox": [ + 104, + 245, + 506, + 268 + ], + "type": "text", + "content": " which relates to the score function as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 127, + 275, + 505, + 289 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 275, + 505, + 289 + ], + "spans": [ + { + "bbox": [ + 127, + 275, + 505, + 289 + ], + "type": "interline_equation", + "content": "s _ {\\theta} (x, t / T) = - \\epsilon_ {\\theta} (x, t) / \\sqrt {1 - \\bar {\\alpha} _ {t}}, \\text {o r e q u i v a l e n t l y ,} - \\sigma (t / T) s _ {\\theta} (x, t / T) = \\epsilon_ {\\theta} (x, t). \\tag {10}", + "image_path": "ae6db9e01025cb98139c4224696353ec6a3d6deaf16ca02c05033e13f5ca5ee4.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 300, + 504, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 504, + 335 + ], + "type": "text", + "content": "Note that in this context, we have " + }, + { + "bbox": [ + 104, + 300, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\sigma^2 (t / T) = 1 - \\bar{\\alpha}_t" + }, + { + "bbox": [ + 104, + 300, + 504, + 335 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 300, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\psi (t / T) = \\sqrt{\\bar{\\alpha}_t}" + }, + { + "bbox": [ + 104, + 300, + 504, + 335 + ], + "type": "text", + "content": ". Equation 9 (the transition kernel) and Equation 10 (the score function) provide us with the recipe for estimating " + }, + { + "bbox": [ + 104, + 300, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 300, + 504, + 335 + ], + "type": "text", + "content": " using FLIPD with DDPMs (recall Equation 6)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 340, + 506, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 506, + 373 + ], + "type": "text", + "content": "When sampling from DMs we use the DDIM sampler (Song et al., 2020), mirroring the setup in Wen et al. (2023). In our notation, this sampler defines " + }, + { + "bbox": [ + 104, + 340, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\tilde{x}_t \\coloneqq x_t / \\psi(t)" + }, + { + "bbox": [ + 104, + 340, + 506, + 373 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 340, + 506, + 373 + ], + "type": "inline_equation", + "content": "x_t" + }, + { + "bbox": [ + 104, + 340, + 506, + 373 + ], + "type": "text", + "content": " is given as in Equation 3. In turn, " + }, + { + "bbox": [ + 104, + 340, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\tilde{x}_t" + }, + { + "bbox": [ + 104, + 340, + 506, + 373 + ], + "type": "text", + "content": " obeys the forward SDE:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 239, + 380, + 505, + 394 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 380, + 505, + 394 + ], + "spans": [ + { + "bbox": [ + 239, + 380, + 505, + 394 + ], + "type": "interline_equation", + "content": "\\mathrm {d} \\tilde {x} _ {t} = \\tilde {g} (t) \\mathrm {d} W _ {t}, \\quad \\tilde {x} _ {0} \\sim p _ {*} (x), \\tag {11}", + "image_path": "2e17bfa0502cf951c2069766cdd54aba6d49be434638ec85ecef39b4ea609f77.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 400, + 392, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 400, + 392, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 400, + 392, + 413 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 400, + 392, + 413 + ], + "type": "inline_equation", + "content": "\\tilde{g}(t) = g(t) / \\psi(t)" + }, + { + "bbox": [ + 104, + 400, + 392, + 413 + ], + "type": "text", + "content": ". This SDE has a corresponding score function" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 246, + 420, + 505, + 434 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 420, + 505, + 434 + ], + "spans": [ + { + "bbox": [ + 246, + 420, + 505, + 434 + ], + "type": "interline_equation", + "content": "\\tilde {s} _ {\\theta} (x, t) = \\psi (t) s _ {\\theta} (\\psi (t) x, t), \\tag {12}", + "image_path": "b135ba86c1957451dbab200b25bafee4eefc8a7486e30d5fdde1e1550fc2491a.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 440, + 504, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 440, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 440, + 504, + 475 + ], + "type": "text", + "content": "and DDIM uses this score function to sample from the model. The transition kernel corresponding to Equation 11 has " + }, + { + "bbox": [ + 104, + 440, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\tilde{\\psi}(t) = 1" + }, + { + "bbox": [ + 104, + 440, + 504, + 475 + ], + "type": "text", + "content": " and a " + }, + { + "bbox": [ + 104, + 440, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\tilde{\\sigma}(t)" + }, + { + "bbox": [ + 104, + 440, + 504, + 475 + ], + "type": "text", + "content": " which can be computed in closed form. Analogously to Equation 10, we can define " + }, + { + "bbox": [ + 104, + 440, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\tilde{\\epsilon}_{\\theta}" + }, + { + "bbox": [ + 104, + 440, + 504, + 475 + ], + "type": "text", + "content": " as" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 239, + 482, + 505, + 496 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 482, + 505, + 496 + ], + "spans": [ + { + "bbox": [ + 239, + 482, + 505, + 496 + ], + "type": "interline_equation", + "content": "\\tilde {\\epsilon} _ {\\theta} (x, t) = - \\tilde {\\sigma} (t / T) \\tilde {s} _ {\\theta} (x, t / T). \\tag {13}", + "image_path": "9eee356fa83b584a85adb33c6323c117507ee4568a34991c753c9a4443db44cc.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 502, + 504, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 502, + 504, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 502, + 504, + 525 + ], + "type": "text", + "content": "We highlight that FLIPD (Equation 6) can be applied using the forward SDE in Equation 11 along with its corresponding score function in Equation 12, resulting in the estimate" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 186, + 532, + 505, + 552 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 532, + 505, + 552 + ], + "spans": [ + { + "bbox": [ + 186, + 532, + 505, + 552 + ], + "type": "interline_equation", + "content": "\\widetilde {\\operatorname {F L I P D}} (x, t) = d + \\tilde {\\sigma} ^ {2} (t) \\left(\\operatorname {t r} \\left(\\nabla \\tilde {s} _ {\\theta} (x, t)\\right) + \\| \\tilde {s} _ {\\theta} (x, t) \\| ^ {2}\\right). \\tag {14}", + "image_path": "1605efb7923653158bf5210893a2165212a0b909be20eac957cdff2fd3592a52.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "text", + "content": "Note that this estimate can be computed when having access to " + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "inline_equation", + "content": "\\tilde{\\epsilon}_{\\theta}" + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "text", + "content": " thanks to Equation 13. We also note that in our text-conditioning analysis, we are interested in the probabilities conditioned by the text prompt, thus, these score functions are extended by the conditioning variable " + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "text", + "content": ", resulting in the modified forms " + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "inline_equation", + "content": "\\epsilon_{\\theta}(x; t, c)" + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "inline_equation", + "content": "\\tilde{\\epsilon}_{\\theta}(x; t, c)" + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "inline_equation", + "content": "s_{\\theta}(x; t / T, c)" + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "inline_equation", + "content": "\\tilde{s}_{\\theta}(x; t / T, c)" + }, + { + "bbox": [ + 104, + 558, + 505, + 605 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 617, + 479, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 617, + 479, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 617, + 479, + 628 + ], + "type": "text", + "content": "D.2 UNIFYING DIFFERENTIABLE METRICS FOR TEXT-CONDITIONED MEMORIZATION" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "text", + "content": "We begin by revisiting the differentiable memorization metric used by Wen et al. (2023) for detecting and mitigating memorization, reformulating it within the continuous, score-based framework of diffusion models. Building on this, we perform an analysis, making minimal modifications to the original formulation to derive alternative metrics that remain effective and are theoretically-grounded. As a result, here we will formally derive three differentiable metrics: " + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}(c)" + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\theta}^{s_{\\theta}^{\\mathrm{CFG}}}(c)" + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "text", + "content": ", and finally " + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}(c)" + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "text", + "content": ". We show that the value Wen et al. (2023) compute in their paper is in fact an estimator of " + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}(c)" + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "text", + "content": ", rescaled by a constant. We then make minor modifications to introduce the two new metrics " + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\theta}^{s_{\\theta}^{\\mathrm{CFG}}}(c)" + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}" + }, + { + "bbox": [ + 104, + 639, + 506, + 733 + ], + "type": "text", + "content": " and interpret them through the lens of the MMH." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "content": "The Differentiable Metric of Wen et al. (2023) For any text condition " + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "content": ", Wen et al. (2023) generate multiple samples " + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "inline_equation", + "content": "(\\tilde{x}_0^{(n)})_{n=1}^N" + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "content": ", with the " + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "content": "th sample following the (DDIM) trajectory " + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "inline_equation", + "content": "\\{\\tilde{x}_T^{(n)}, \\tilde{x}_{T-1}^{(n)}, \\ldots, \\tilde{x}_0^{(n)}\\}" + }, + { + "bbox": [ + 104, + 82, + 506, + 129 + ], + "type": "text", + "content": " from noise to data through the denoising process. They then introduce the following metric, which we have slightly reformulated to match our notation:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 159, + 131, + 505, + 163 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 131, + 505, + 163 + ], + "spans": [ + { + "bbox": [ + 159, + 131, + 505, + 163 + ], + "type": "interline_equation", + "content": "\\mathcal {A} ^ {\\mathrm {C F G}} (c; N, T) = \\frac {1}{T N} \\sum_ {n = 1} ^ {N} \\sum_ {t = 0} ^ {T} \\left\\| \\tilde {\\epsilon} _ {\\theta} \\left(\\tilde {x} _ {t} ^ {(n)}; t, c\\right) - \\tilde {\\epsilon} _ {\\theta} \\left(\\tilde {x} _ {t} ^ {(n)}; t, \\emptyset\\right) \\right\\| ^ {2}. \\tag {15}", + "image_path": "07b0ffd003b5acb52a63224493b62a9e7853a9e29c1caaeb363a7eb41b093188.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 170, + 504, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 170, + 504, + 192 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 504, + 192 + ], + "type": "text", + "content": "We colour-code the metric in red to distinguish between it and the analogous metric that we will shortly derive at the end of this section." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 197, + 505, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 197, + 505, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 197, + 505, + 232 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 197, + 505, + 232 + ], + "type": "inline_equation", + "content": "\\tilde{p}_t^{\\mathrm{CFG}}" + }, + { + "bbox": [ + 104, + 197, + 505, + 232 + ], + "type": "text", + "content": " represent the marginal probability at time " + }, + { + "bbox": [ + 104, + 197, + 505, + 232 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 197, + 505, + 232 + ], + "type": "text", + "content": " induced by the DDIM sampler conditioned on " + }, + { + "bbox": [ + 104, + 197, + 505, + 232 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 197, + 505, + 232 + ], + "type": "text", + "content": " with the addition of the CFG term. Recall from Equation 2 that the score used for sampling from " + }, + { + "bbox": [ + 104, + 197, + 505, + 232 + ], + "type": "inline_equation", + "content": "\\tilde{p}_t^{\\mathrm{CFG}}(\\cdot \\mid c)" + }, + { + "bbox": [ + 104, + 197, + 505, + 232 + ], + "type": "text", + "content": " with CFG is" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 190, + 233, + 505, + 247 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 233, + 505, + 247 + ], + "spans": [ + { + "bbox": [ + 190, + 233, + 505, + 247 + ], + "type": "interline_equation", + "content": "\\tilde {s} _ {\\theta} ^ {\\mathrm {C F G}} (x; t, c) = \\tilde {s} _ {\\theta} (x; t, \\emptyset) + \\lambda (\\tilde {s} _ {\\theta} (x; t, c) - \\tilde {s} _ {\\theta} (x; t, \\emptyset)). \\tag {16}", + "image_path": "b0da8ce813ee542b7cff3a16a00d95be623de22ba54be1fe8388fd6de46633a7.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 247, + 416, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 416, + 260 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 416, + 260 + ], + "type": "text", + "content": "Using Equation 13 and Equation 2, we can rewrite " + }, + { + "bbox": [ + 104, + 247, + 416, + 260 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}(c;N,T)" + }, + { + "bbox": [ + 104, + 247, + 416, + 260 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 116, + 262, + 505, + 294 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 262, + 505, + 294 + ], + "spans": [ + { + "bbox": [ + 116, + 262, + 505, + 294 + ], + "type": "interline_equation", + "content": "\\mathcal {A} ^ {\\mathrm {C F G}} (c; N, T) := \\frac {1}{T N} \\sum_ {n = 1} ^ {N} \\sum_ {t = 0} ^ {T} \\left\\| - \\frac {\\tilde {\\sigma} (t / T)}{\\lambda} \\left[ \\tilde {s} _ {\\theta} ^ {\\mathrm {C F G}} \\left(\\tilde {x} _ {t} ^ {(n)}; t / T, c\\right) - \\tilde {s} _ {\\theta} \\left(\\tilde {x} _ {t} ^ {(n)}; t / T, \\emptyset\\right) \\right] \\right\\| ^ {2}. \\tag {17}", + "image_path": "f0a2993a86dc8cf3aceef4ee65b2ca7f68ed1df34e0c43388883f90413598281.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 295, + 504, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 295, + 504, + 317 + ], + "spans": [ + { + "bbox": [ + 104, + 295, + 504, + 317 + ], + "type": "text", + "content": "We now assume " + }, + { + "bbox": [ + 104, + 295, + 504, + 317 + ], + "type": "inline_equation", + "content": "T \\to \\infty" + }, + { + "bbox": [ + 104, + 295, + 504, + 317 + ], + "type": "text", + "content": ", which will reformulate Equation 17 with an integral that we will replace with an expectation:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 123, + 318, + 505, + 441 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 318, + 505, + 441 + ], + "spans": [ + { + "bbox": [ + 123, + 318, + 505, + 441 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {A} ^ {\\mathrm {C F G}} (c; N) := \\lim _ {T \\rightarrow \\infty} \\mathcal {A} ^ {\\mathrm {C F G}} (c; N, T) (18) \\\\ = \\lambda^ {- 2} \\cdot \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\int_ {0} ^ {1} \\tilde {\\sigma} ^ {2} (t) \\| \\tilde {s} _ {\\theta} ^ {\\mathrm {C F G}} \\left(\\tilde {x} _ {t} ^ {(n)}; t, c\\right) - \\tilde {s} _ {\\theta} \\left(\\tilde {x} _ {t} ^ {(n)}; t, \\emptyset\\right) \\| ^ {2} d t (19) \\\\ = \\lambda^ {- 2} \\cdot \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\mathbb {E} _ {t \\sim \\mathcal {U} (0, 1)} \\left[ \\tilde {\\sigma} ^ {2} (t) \\| \\tilde {s} _ {\\theta} ^ {\\mathrm {C F G}} \\left(\\tilde {x} _ {t} ^ {(n)}; t, c\\right) - \\tilde {s} _ {\\theta} \\left(\\tilde {x} _ {t} ^ {(n)}; t, \\emptyset\\right) \\| ^ {2} \\right] (20) \\\\ = \\lambda^ {- 2} \\cdot \\mathbb {E} _ {t \\sim \\mathcal {U} (0, 1)} \\left[ \\frac {1}{N} \\sum_ {n = 1} ^ {N} \\tilde {\\sigma} ^ {2} (t) \\cdot \\| \\tilde {s} _ {\\theta} ^ {\\mathrm {C F G}} \\left(\\tilde {x} _ {t} ^ {(n)}; t, c\\right) - \\tilde {s} _ {\\theta} \\left(\\tilde {x} _ {t} ^ {(n)}; t, \\emptyset\\right) \\| ^ {2} \\right]. (21) \\\\ \\end{array}", + "image_path": "23d7a58524ca0fe7731cb9cf27681e7b6799672224461a1f97db50a5fa471b3e.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 443, + 506, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 443, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 443, + 506, + 475 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 104, + 443, + 506, + 475 + ], + "type": "inline_equation", + "content": "\\mathcal{U}(0,1)" + }, + { + "bbox": [ + 104, + 443, + 506, + 475 + ], + "type": "text", + "content": " denotes the uniform distribution. Next, we observe that the inner term of the expectation on the right-hand-side of Equation 21 is in fact a Monte-Carlo estimator. By the law of large numbers, we have the following:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 140, + 477, + 505, + 514 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 477, + 505, + 514 + ], + "spans": [ + { + "bbox": [ + 140, + 477, + 505, + 514 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {A} ^ {\\mathrm {C F G}} (c) := \\lim _ {N \\rightarrow \\infty} \\mathcal {A} ^ {\\mathrm {C F G}} (c; N) (22) \\\\ = \\lambda^ {- 2} \\cdot \\mathbb {E} _ {t \\sim \\mathcal {U} (0, 1)} \\mathbb {E} _ {\\tilde {x} _ {t} \\sim \\tilde {p} _ {t} ^ {\\mathrm {C F G}} (\\cdot | c)} \\left[ \\tilde {\\sigma} ^ {2} (t) \\cdot \\| \\tilde {s} _ {\\theta} ^ {\\mathrm {C F G}} (\\tilde {x} _ {t}; t, c) - \\tilde {s} _ {\\theta} (\\tilde {x} _ {t}; t, \\emptyset) \\| ^ {2} \\right]. (23) \\\\ \\end{array}", + "image_path": "478cdea4374fdc33c08e744206dc4583eeafbed888a52396abc244e2d4d8db6f.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "type": "text", + "content": "We now see that with the new formulation, all the red terms in Equation 23, have gone away, making it fully amenable to the score-based formulation of diffusion models. The " + }, + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "type": "text", + "content": " factor merely scales the metric, and for the purposes of detection and mitigation, this scaling is inconsequential: if a metric effectively predicts memorization, rescaling it will not diminish its effectiveness as a predictor. We thus disregard the scaling factor " + }, + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "type": "text", + "content": " to make the derivation cleaner and replace the uniform distribution " + }, + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "type": "inline_equation", + "content": "\\mathcal{U}(0,1)" + }, + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "type": "text", + "content": " with a general \"scheduling\" distribution " + }, + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(0,1)" + }, + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "type": "text", + "content": " of timesteps in " + }, + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "type": "inline_equation", + "content": "(0,1]" + }, + { + "bbox": [ + 104, + 520, + 506, + 597 + ], + "type": "text", + "content": "; this would allow our metric to be a generalization of the one proposed by Wen et al. (2023):" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 150, + 598, + 505, + 615 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 598, + 505, + 615 + ], + "spans": [ + { + "bbox": [ + 150, + 598, + 505, + 615 + ], + "type": "interline_equation", + "content": "\\mathcal {A} ^ {\\mathrm {C F G}} (c) := \\mathbb {E} _ {t \\sim \\mathcal {T} (0, 1)} \\mathbb {E} _ {\\tilde {x} _ {t} \\sim \\tilde {p} _ {t} ^ {\\mathrm {C F G}} (\\cdot | c)} \\left[ \\tilde {\\sigma} ^ {2} (t) \\cdot \\| \\tilde {s} _ {\\theta} ^ {\\mathrm {C F G}} (\\tilde {x} _ {t}; t, c) - \\tilde {s} _ {\\theta} (\\tilde {x} _ {t}; t, \\emptyset) \\| ^ {2} \\right]. \\tag {24}", + "image_path": "fd38a7ecf98255de546574c840cae16b3bf1da10e28950356c352e31317d5d05.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 620, + 504, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 620, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 620, + 504, + 654 + ], + "type": "text", + "content": "Simplifying Further We have shown that the CFG vector norm and the CFG adjusted score norm behave similarly in Figure 3. If, instead of considering the CFG vector norm in Equation 24, we consider the CFG-adjusted score " + }, + { + "bbox": [ + 104, + 620, + 504, + 654 + ], + "type": "inline_equation", + "content": "\\tilde{s}_{\\theta}^{\\mathrm{CFG}}(\\cdot ;t,c)" + }, + { + "bbox": [ + 104, + 620, + 504, + 654 + ], + "type": "text", + "content": ", we arrive at the following metric:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 177, + 656, + 505, + 673 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 656, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 177, + 656, + 505, + 673 + ], + "type": "interline_equation", + "content": "\\mathcal {A} ^ {s _ {\\theta} ^ {\\mathrm {C F G}}} (c) := \\mathbb {E} _ {t \\sim \\mathcal {T} (0, 1)} \\mathbb {E} _ {\\tilde {x} _ {t} \\sim \\tilde {p} _ {t} ^ {\\mathrm {C F G}} (\\cdot | c)} \\left[ \\tilde {\\sigma} ^ {2} (t) \\| \\tilde {s} _ {\\theta} ^ {\\mathrm {C F G}} (\\tilde {x} _ {t}; t, c) \\| ^ {2} \\right]. \\tag {25}", + "image_path": "57484d7382d655b7f941bb465d106a53a7d354c43f887dcc88a014c63636b8cd.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "type": "text", + "content": "We have shown this to be a viable memorization metric, able to detect tokens driving memorization in Figure 11, and behaving comparable to " + }, + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}(c)" + }, + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "type": "text", + "content": ", the original metric proposed by Wen et al. (2023). However, a nice property of " + }, + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{s_{\\theta}^{\\mathrm{CFG}}}(c)" + }, + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "type": "text", + "content": " is that it can now be linked to MMH: for a memorized prompt where " + }, + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(\\cdot \\mid c)" + }, + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "type": "text", + "content": " is small, the score function " + }, + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\tilde{s}_{\\theta}^{\\mathrm{CFG}}(\\cdot ;t,c)" + }, + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "type": "text", + "content": ", especially for small " + }, + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 674, + 506, + 733 + ], + "type": "text", + "content": ", tends to become large, causing the metric in Equation 25 to increase significantly." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": "Linking to FLIPD We now propose a more direct proxy for LID based on the FLIPD estimate of " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": ". Recalling Equation 6, we can define the class-conditional " + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(\\cdot \\mid c)" + }, + { + "bbox": [ + 104, + 82, + 504, + 118 + ], + "type": "text", + "content": " estimate based on FLIPD as follows, analogously to Equation 14:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 154, + 123, + 505, + 146 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 123, + 505, + 146 + ], + "spans": [ + { + "bbox": [ + 154, + 123, + 505, + 146 + ], + "type": "interline_equation", + "content": "\\widetilde {\\text {F L I P D}} ^ {\\text {C F G}} (x; t, c) = d + \\tilde {\\sigma} ^ {2} (t) \\cdot \\left(\\operatorname {t r} \\left(\\nabla \\tilde {s} _ {\\theta} ^ {\\text {C F G}} (x; t, c)\\right) + \\| \\tilde {s} _ {\\theta} ^ {\\text {C F G}} (x; t, c) \\| ^ {2}\\right). \\tag {26}", + "image_path": "11da6900a803aca015af5d2b5ad16123aad2f3941f3231ed20c35abe65e6c602.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 154, + 504, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 504, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 504, + 182 + ], + "type": "text", + "content": "Noting that " + }, + { + "bbox": [ + 104, + 154, + 504, + 182 + ], + "type": "inline_equation", + "content": "\\widetilde{\\text{FLIPD}}^{\\text{CFG}}" + }, + { + "bbox": [ + 104, + 154, + 504, + 182 + ], + "type": "text", + "content": " has a similar term to Equation 25, we add " + }, + { + "bbox": [ + 104, + 154, + 504, + 182 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 154, + 504, + 182 + ], + "type": "text", + "content": " and the trace term from Equation 26 into Equation 25, and propose the following MMH-based metric:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 137, + 189, + 505, + 206 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 189, + 505, + 206 + ], + "spans": [ + { + "bbox": [ + 137, + 189, + 505, + 206 + ], + "type": "interline_equation", + "content": "d + \\mathcal {A} ^ {s _ {\\theta} ^ {\\mathrm {C F G}}} (c) + \\mathbb {E} _ {t \\sim \\mathcal {T} (0, 1)} \\mathbb {E} _ {\\tilde {x} _ {t} \\sim \\tilde {p} _ {t} ^ {\\mathrm {C F G}} (\\cdot | c)} \\left[ \\tilde {\\sigma} ^ {2} (t) \\cdot \\operatorname {t r} \\left(\\nabla \\tilde {s} _ {\\theta} ^ {\\mathrm {C F G}} \\left(\\tilde {x} _ {t}; t, c\\right)\\right) \\right] = \\tag {27}", + "image_path": "af2a22c283bbb228faf74071231c67029c08556e8a74341413b04952de98197c.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 231, + 208, + 505, + 235 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 231, + 208, + 505, + 235 + ], + "spans": [ + { + "bbox": [ + 231, + 208, + 505, + 235 + ], + "type": "interline_equation", + "content": "\\mathbb {E} _ {t \\sim \\mathcal {T} (0, 1)} \\mathbb {E} _ {\\tilde {x} _ {t} \\sim \\tilde {p} _ {t} ^ {\\mathrm {C F G}} (\\cdot | c)} \\left[ \\widetilde {\\text {F L I P D}} ^ {\\mathrm {C F G}} \\left(\\tilde {x} _ {t}; t, c\\right) \\right] =: \\mathcal {A} ^ {\\mathrm {F L I P D}} (c). \\tag {28}", + "image_path": "57a5661709573dbdc83a4c8457b48a053c3669cd2302e5703efdb4b3c50348ae.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 243, + 506, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 506, + 268 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 506, + 268 + ], + "type": "text", + "content": "Despite the fact that " + }, + { + "bbox": [ + 104, + 243, + 506, + 268 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}(c)" + }, + { + "bbox": [ + 104, + 243, + 506, + 268 + ], + "type": "text", + "content": " can be expressed in terms of " + }, + { + "bbox": [ + 104, + 243, + 506, + 268 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{\\theta}^{s_{\\theta}^{\\mathrm{CFG}}}(c)" + }, + { + "bbox": [ + 104, + 243, + 506, + 268 + ], + "type": "text", + "content": ", the former indicates memorization when it is small, while the latter indicates memorization when it is large." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": "Note that while Equation 28 averages FLIPD values over (potentially) all the timesteps " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "t \\in (0,1]" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": ", the theory linking FLIPD and " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": " is only rigorously justified when " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "t \\to 0" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": " (Kamkari et al., 2024b). Hence, we set the scheduling distribution " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": " such that it primarily samples " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": " close to zero. As such, " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": " will average FLIPD estimate terms that are closely linked to " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(\\cdot \\mid c)" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": ". Notably, our experiments also revealed that although setting " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": " as small as possible makes sense from a mathematical perspective, the score function, and as a result, FLIPD estimates, become unstable as " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "t \\to 0" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": " (Pidstrigach, 2022; Kamkari et al., 2024b). Therefore, in practice, we choose " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": " as a uniform supported on " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "(0.0,0.2]" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": "; therefore, putting more emphasis on these small " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": " values but at the same time avoiding instabilities in " + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}(c)" + }, + { + "bbox": [ + 104, + 272, + 506, + 373 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "spans": [ + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "text", + "content": "The scheduling is a small, but important distinction between " + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}" + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "text", + "content": " on one hand, and " + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{s_{\\theta}^{\\mathrm{CFG}}}" + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}" + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "text", + "content": " on the other hand; while " + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}" + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "text", + "content": " sets " + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "text", + "content": " as a uniform on " + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "inline_equation", + "content": "(0.0, 0.2]" + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{s_{\\theta}^{\\mathrm{CFG}}}" + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}" + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "text", + "content": " set " + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "text", + "content": " to a uniform distribution on " + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "inline_equation", + "content": "(0, 1]" + }, + { + "bbox": [ + 104, + 377, + 504, + 416 + ], + "type": "text", + "content": ", to mirror the setup in Wen et al. (2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 429, + 383, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 429, + 383, + 442 + ], + "spans": [ + { + "bbox": [ + 105, + 429, + 383, + 442 + ], + "type": "text", + "content": "D.3 INCREASING IMAGE COMPLEXITY BY OPTIMIZING " + }, + { + "bbox": [ + 105, + 429, + 383, + 442 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "text", + "content": "Wen et al. (2023) have an experiment where they optimize the prompt (embedding) " + }, + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "text", + "content": " directly to minimize " + }, + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}(c)" + }, + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "text", + "content": ", and as a result decrease " + }, + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}(c)" + }, + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "text", + "content": ", with the purpose of obviating memorization. Here, we take a similar approach but instead optimize " + }, + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "text", + "content": " to maximize " + }, + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}(c)" + }, + { + "bbox": [ + 104, + 452, + 506, + 486 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "type": "text", + "content": "In Figure 9, we optimize " + }, + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "type": "text", + "content": " with Adam using multiple steps, and as we increase " + }, + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "type": "inline_equation", + "content": "A^{\\mathrm{FLIPD}}(c)" + }, + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "type": "text", + "content": ", we sample images using the prompt embedding which is being optimized. We see that images sampled from these prompts indeed increase in complexity. This is fully consistent with our expectations and understanding of LID. We see, however, that while at a certain range the images are relatively less memorized, the method tends to introduce excessively chaotic textures to artificially increase " + }, + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(\\cdot \\mid c)" + }, + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "type": "text", + "content": ", often at the expense of the image's semantic coherence. Despite this, we still find this to be an interesting result and invite future work on using different scheduling approaches for " + }, + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "type": "inline_equation", + "content": "A^{\\mathrm{FLIPD}}(c)" + }, + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "type": "text", + "content": " that can stabilize the optimization process of " + }, + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 491, + 506, + 580 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 594, + 286, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 286, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 286, + 605 + ], + "type": "text", + "content": "D.4 TEXT PERTURBATION APPROACHES" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 616, + 506, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 506, + 716 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 506, + 716 + ], + "type": "text", + "content": "GPT-based Perturbations As outlined in the main text, we sample " + }, + { + "bbox": [ + 104, + 616, + 506, + 716 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 616, + 506, + 716 + ], + "type": "text", + "content": " tokens without replacement from a categorical distribution obtained by normalizing the token attributions, then use GPT-4 to replace these tokens; this ensures that tokens with the highest attributions are replaced more frequently. After selecting these " + }, + { + "bbox": [ + 104, + 616, + 506, + 716 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 616, + 506, + 716 + ], + "type": "text", + "content": " tokens, we ask GPT to follow the instructions provided in Box 1 and use the output of the conversation as the new prompt. This process is repeated five times in our analysis to account for any randomness in the output of GPT. It is important to note that these perturbations are designed to preserve the semantic structure of the prompt. To ensure this, the instruction specifically asks GPT not to replace names of places or characters, and to keep the new prompt as semantically close to the original as possible." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 89, + 171, + 155 + ], + "blocks": [ + { + "bbox": [ + 106, + 89, + 171, + 155 + ], + "lines": [ + { + "bbox": [ + 106, + 89, + 171, + 155 + ], + "spans": [ + { + "bbox": [ + 106, + 89, + 171, + 155 + ], + "type": "image", + "image_path": "7433e209bc21454e8ad9dd9c97aa6becc2549f8bb58e191c8f6e4912bbe36eec.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 146, + 159, + 463, + 167 + ], + "lines": [ + { + "bbox": [ + 146, + 159, + 463, + 167 + ], + "spans": [ + { + "bbox": [ + 146, + 159, + 463, + 167 + ], + "type": "text", + "content": "(a) Emma Watson Set to Star Alongside Tom Hanks in Film Adaptation of Dave Eggers' The Circle" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 173, + 89, + 236, + 155 + ], + "blocks": [ + { + "bbox": [ + 173, + 89, + 236, + 155 + ], + "lines": [ + { + "bbox": [ + 173, + 89, + 236, + 155 + ], + "spans": [ + { + "bbox": [ + 173, + 89, + 236, + 155 + ], + "type": "image", + "image_path": "b6bacfb6b213e7a1142241d16164ab200c28ba7b607e5b8d59d21e3d1a4d3be0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 239, + 89, + 304, + 155 + ], + "blocks": [ + { + "bbox": [ + 239, + 89, + 304, + 155 + ], + "lines": [ + { + "bbox": [ + 239, + 89, + 304, + 155 + ], + "spans": [ + { + "bbox": [ + 239, + 89, + 304, + 155 + ], + "type": "image", + "image_path": "59dbb4f95414e89d6643d9f149b122c8f26cebf60b5492a93e861e155e863330.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 306, + 89, + 370, + 155 + ], + "blocks": [ + { + "bbox": [ + 306, + 89, + 370, + 155 + ], + "lines": [ + { + "bbox": [ + 306, + 89, + 370, + 155 + ], + "spans": [ + { + "bbox": [ + 306, + 89, + 370, + 155 + ], + "type": "image", + "image_path": "18f528cd535eea771f4fbc07cb7994f09c43717c4517cb866c0e7609e7040623.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 373, + 89, + 438, + 155 + ], + "blocks": [ + { + "bbox": [ + 373, + 89, + 438, + 155 + ], + "lines": [ + { + "bbox": [ + 373, + 89, + 438, + 155 + ], + "spans": [ + { + "bbox": [ + 373, + 89, + 438, + 155 + ], + "type": "image", + "image_path": "9b972fe06bb6c5b1fb95b856cef7c6f5ad738017905efc28b383f2bd1c10b1c8.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 440, + 89, + 504, + 155 + ], + "blocks": [ + { + "bbox": [ + 440, + 89, + 504, + 155 + ], + "lines": [ + { + "bbox": [ + 440, + 89, + 504, + 155 + ], + "spans": [ + { + "bbox": [ + 440, + 89, + 504, + 155 + ], + "type": "image", + "image_path": "be5099e8825a583427a62b69a7494fd94a143d259fe69c7ffb5c8af820b04bc1.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 107, + 171, + 170, + 236 + ], + "blocks": [ + { + "bbox": [ + 107, + 171, + 170, + 236 + ], + "lines": [ + { + "bbox": [ + 107, + 171, + 170, + 236 + ], + "spans": [ + { + "bbox": [ + 107, + 171, + 170, + 236 + ], + "type": "image", + "image_path": "28982a0b0778f7a678b960f2cdcdf3b01d9759bf117ae42e35c1d0dc7f1efe6c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 171, + 171, + 236, + 236 + ], + "blocks": [ + { + "bbox": [ + 171, + 171, + 236, + 236 + ], + "lines": [ + { + "bbox": [ + 171, + 171, + 236, + 236 + ], + "spans": [ + { + "bbox": [ + 171, + 171, + 236, + 236 + ], + "type": "image", + "image_path": "1c9d4d1340d5351617a1bf42518b80ce82de51606e74be05ae51147ea81acfd4.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 190, + 242, + 419, + 250 + ], + "lines": [ + { + "bbox": [ + 190, + 242, + 419, + 250 + ], + "spans": [ + { + "bbox": [ + 190, + 242, + 419, + 250 + ], + "type": "text", + "content": "(b) Aero 31-984210BLK 31 Series 13x8 Wheel, Spun 4 on 4-1/4 BP 1 Inch BS" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 239, + 171, + 304, + 236 + ], + "blocks": [ + { + "bbox": [ + 239, + 171, + 304, + 236 + ], + "lines": [ + { + "bbox": [ + 239, + 171, + 304, + 236 + ], + "spans": [ + { + "bbox": [ + 239, + 171, + 304, + 236 + ], + "type": "image", + "image_path": "d1092e8a1364ae92e5b94540761ed08092cd79a03a32fdfbfd9bf6fc5c5eefbc.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 306, + 171, + 370, + 237 + ], + "blocks": [ + { + "bbox": [ + 306, + 171, + 370, + 237 + ], + "lines": [ + { + "bbox": [ + 306, + 171, + 370, + 237 + ], + "spans": [ + { + "bbox": [ + 306, + 171, + 370, + 237 + ], + "type": "image", + "image_path": "1442ec45db8927de254f8d11040f16c4243e2450adcd499ae050380bf47bfa25.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 373, + 171, + 438, + 237 + ], + "blocks": [ + { + "bbox": [ + 373, + 171, + 438, + 237 + ], + "lines": [ + { + "bbox": [ + 373, + 171, + 438, + 237 + ], + "spans": [ + { + "bbox": [ + 373, + 171, + 438, + 237 + ], + "type": "image", + "image_path": "59fec66cc34098ea25dacfdaeb336c38f48f1f217b389a71d4b63eb9c6a262ac.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 440, + 171, + 504, + 237 + ], + "blocks": [ + { + "bbox": [ + 440, + 171, + 504, + 237 + ], + "lines": [ + { + "bbox": [ + 440, + 171, + 504, + 237 + ], + "spans": [ + { + "bbox": [ + 440, + 171, + 504, + 237 + ], + "type": "image", + "image_path": "a450bdfb96f97d9222b24f7a63195e8690dda7b7c52e311738f83bc1b09c28d8.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 106, + 255, + 171, + 320 + ], + "blocks": [ + { + "bbox": [ + 106, + 255, + 171, + 320 + ], + "lines": [ + { + "bbox": [ + 106, + 255, + 171, + 320 + ], + "spans": [ + { + "bbox": [ + 106, + 255, + 171, + 320 + ], + "type": "image", + "image_path": "cd9ea13afd2d7ec656f54f557a47daef5ad993e7b8dc5584671364e446960aff.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 173, + 255, + 235, + 320 + ], + "blocks": [ + { + "bbox": [ + 173, + 255, + 235, + 320 + ], + "lines": [ + { + "bbox": [ + 173, + 255, + 235, + 320 + ], + "spans": [ + { + "bbox": [ + 173, + 255, + 235, + 320 + ], + "type": "image", + "image_path": "aef16aa1eb2dae6bbdd019174a433125e8259851eff6fb24f7cf7d079eb07557.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 238, + 255, + 304, + 320 + ], + "blocks": [ + { + "bbox": [ + 238, + 255, + 304, + 320 + ], + "lines": [ + { + "bbox": [ + 238, + 255, + 304, + 320 + ], + "spans": [ + { + "bbox": [ + 238, + 255, + 304, + 320 + ], + "type": "image", + "image_path": "4d722de73c19ac43529e8c09243bedf99dc71ca3b848b1614ba5f91c6f8985f2.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 261, + 325, + 349, + 332 + ], + "lines": [ + { + "bbox": [ + 261, + 325, + 349, + 332 + ], + "spans": [ + { + "bbox": [ + 261, + 325, + 349, + 332 + ], + "type": "text", + "content": "(c) Air Conditioners & Parts" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 306, + 255, + 370, + 320 + ], + "blocks": [ + { + "bbox": [ + 306, + 255, + 370, + 320 + ], + "lines": [ + { + "bbox": [ + 306, + 255, + 370, + 320 + ], + "spans": [ + { + "bbox": [ + 306, + 255, + 370, + 320 + ], + "type": "image", + "image_path": "c3be853675da42ecbeda104f10a1cc6c12ac1590c2afb7cef4bde682ff6db5e6.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 373, + 255, + 438, + 320 + ], + "blocks": [ + { + "bbox": [ + 373, + 255, + 438, + 320 + ], + "lines": [ + { + "bbox": [ + 373, + 255, + 438, + 320 + ], + "spans": [ + { + "bbox": [ + 373, + 255, + 438, + 320 + ], + "type": "image", + "image_path": "9b2ccf2e59f269af714ee1cee38fd189c1c13d1a4ccaf63ccede9ab8064699d3.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 440, + 255, + 504, + 320 + ], + "blocks": [ + { + "bbox": [ + 440, + 255, + 504, + 320 + ], + "lines": [ + { + "bbox": [ + 440, + 255, + 504, + 320 + ], + "spans": [ + { + "bbox": [ + 440, + 255, + 504, + 320 + ], + "type": "image", + "image_path": "f31a4db901809225a158ea2d9548fe30954ef6434b1e64be3d8087069112e1e1.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 106, + 337, + 171, + 403 + ], + "blocks": [ + { + "bbox": [ + 106, + 337, + 171, + 403 + ], + "lines": [ + { + "bbox": [ + 106, + 337, + 171, + 403 + ], + "spans": [ + { + "bbox": [ + 106, + 337, + 171, + 403 + ], + "type": "image", + "image_path": "58d09cf05651d9b00a6030694ccffbd330e24ca541e9aa6ee83f5cb27368c6ed.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 173, + 337, + 237, + 403 + ], + "blocks": [ + { + "bbox": [ + 173, + 337, + 237, + 403 + ], + "lines": [ + { + "bbox": [ + 173, + 337, + 237, + 403 + ], + "spans": [ + { + "bbox": [ + 173, + 337, + 237, + 403 + ], + "type": "image", + "image_path": "f02bea39691a7ece7f9618b96fd2ff1f2978a6880c1c50711cda333b56bf4cb9.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 244, + 408, + 366, + 415 + ], + "lines": [ + { + "bbox": [ + 244, + 408, + 366, + 415 + ], + "spans": [ + { + "bbox": [ + 244, + 408, + 366, + 415 + ], + "type": "text", + "content": "(d)Netflix Hits 50 Million Subscribers" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 239, + 337, + 304, + 403 + ], + "blocks": [ + { + "bbox": [ + 239, + 337, + 304, + 403 + ], + "lines": [ + { + "bbox": [ + 239, + 337, + 304, + 403 + ], + "spans": [ + { + "bbox": [ + 239, + 337, + 304, + 403 + ], + "type": "image", + "image_path": "458f6fb019be4822d343a498cfdadfa4946bfcf2d4d4966a1aa99013c134d715.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 307, + 337, + 370, + 403 + ], + "blocks": [ + { + "bbox": [ + 307, + 337, + 370, + 403 + ], + "lines": [ + { + "bbox": [ + 307, + 337, + 370, + 403 + ], + "spans": [ + { + "bbox": [ + 307, + 337, + 370, + 403 + ], + "type": "image", + "image_path": "65c6a921bf7355a5c94b3155b3f708c4a684bdbfa8375f2041aaf9b8e0d6501b.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 373, + 337, + 438, + 403 + ], + "blocks": [ + { + "bbox": [ + 373, + 337, + 438, + 403 + ], + "lines": [ + { + "bbox": [ + 373, + 337, + 438, + 403 + ], + "spans": [ + { + "bbox": [ + 373, + 337, + 438, + 403 + ], + "type": "image", + "image_path": "6b266563c4d5952037743d38bb2701a23f5c57f81e267171d83918cbe2bd01b9.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 440, + 337, + 504, + 403 + ], + "blocks": [ + { + "bbox": [ + 440, + 337, + 504, + 403 + ], + "lines": [ + { + "bbox": [ + 440, + 337, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 440, + 337, + 504, + 403 + ], + "type": "image", + "image_path": "c860270f16504537cc9dd4e85bb19df3b8a5b601f0d9fe055bacb60ceba79459.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 106, + 420, + 171, + 487 + ], + "blocks": [ + { + "bbox": [ + 106, + 420, + 171, + 487 + ], + "lines": [ + { + "bbox": [ + 106, + 420, + 171, + 487 + ], + "spans": [ + { + "bbox": [ + 106, + 420, + 171, + 487 + ], + "type": "image", + "image_path": "b695dad6044e511327e1b6932700901fb2f28c911772779f8dff801305a4b07c.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 173, + 420, + 237, + 486 + ], + "blocks": [ + { + "bbox": [ + 173, + 420, + 237, + 486 + ], + "lines": [ + { + "bbox": [ + 173, + 420, + 237, + 486 + ], + "spans": [ + { + "bbox": [ + 173, + 420, + 237, + 486 + ], + "type": "image", + "image_path": "188c44a19febfdd51e31aa3f8818a7be2603ee3eefc804b130ec0c6c1fb7cf92.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 247, + 491, + 363, + 499 + ], + "lines": [ + { + "bbox": [ + 247, + 491, + 363, + 499 + ], + "spans": [ + { + "bbox": [ + 247, + 491, + 363, + 499 + ], + "type": "text", + "content": "(e) Waterford Sand Silk Stripe Swatch" + } + ] + } + ], + "index": 35, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 240, + 421, + 304, + 486 + ], + "blocks": [ + { + "bbox": [ + 240, + 421, + 304, + 486 + ], + "lines": [ + { + "bbox": [ + 240, + 421, + 304, + 486 + ], + "spans": [ + { + "bbox": [ + 240, + 421, + 304, + 486 + ], + "type": "image", + "image_path": "fd33eda282da7a8d0a5516a6ce631702be1f62679659f3eb6d420c2f4df684cb.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "type": "image", + "bbox": [ + 307, + 421, + 371, + 486 + ], + "blocks": [ + { + "bbox": [ + 307, + 421, + 371, + 486 + ], + "lines": [ + { + "bbox": [ + 307, + 421, + 371, + 486 + ], + "spans": [ + { + "bbox": [ + 307, + 421, + 371, + 486 + ], + "type": "image", + "image_path": "70ac16757d9dbe1478dc8c1d908411f00a4ec91d163c05302272b95f54d5ea7e.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_body" + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 374, + 421, + 438, + 486 + ], + "blocks": [ + { + "bbox": [ + 374, + 421, + 438, + 486 + ], + "lines": [ + { + "bbox": [ + 374, + 421, + 438, + 486 + ], + "spans": [ + { + "bbox": [ + 374, + 421, + 438, + 486 + ], + "type": "image", + "image_path": "b09101af71b64d8ad2c1242d529d64b820eb95d585f054140909bf49c540048c.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "type": "image", + "bbox": [ + 441, + 421, + 504, + 486 + ], + "blocks": [ + { + "bbox": [ + 441, + 421, + 504, + 486 + ], + "lines": [ + { + "bbox": [ + 441, + 421, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 441, + 421, + 504, + 486 + ], + "type": "image", + "image_path": "2e7926525f1ecdca4b0376c1f32ead45bf97ff5eacdd5101e1b9d4327ee7b920.jpg" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_body" + } + ], + "index": 34 + }, + { + "type": "image", + "bbox": [ + 107, + 503, + 171, + 570 + ], + "blocks": [ + { + "bbox": [ + 107, + 503, + 171, + 570 + ], + "lines": [ + { + "bbox": [ + 107, + 503, + 171, + 570 + ], + "spans": [ + { + "bbox": [ + 107, + 503, + 171, + 570 + ], + "type": "image", + "image_path": "73eafd47c5788b106f34424afd69f0f24ee6aa497112b21008d43d87cd295f29.jpg" + } + ] + } + ], + "index": 36, + "angle": 0, + "type": "image_body" + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 173, + 503, + 237, + 569 + ], + "blocks": [ + { + "bbox": [ + 173, + 503, + 237, + 569 + ], + "lines": [ + { + "bbox": [ + 173, + 503, + 237, + 569 + ], + "spans": [ + { + "bbox": [ + 173, + 503, + 237, + 569 + ], + "type": "image", + "image_path": "c7a8f1c970bc3e84100c68be1cf5474507c2d58226c2a3d8cd5b045ef8aa865d.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 202, + 574, + 408, + 582 + ], + "lines": [ + { + "bbox": [ + 202, + 574, + 408, + 582 + ], + "spans": [ + { + "bbox": [ + 202, + 574, + 408, + 582 + ], + "type": "text", + "content": "(f) Björk Explains Decision To Pull Vulnicura From Spotify" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_caption" + } + ], + "index": 37 + }, + { + "type": "image", + "bbox": [ + 241, + 503, + 305, + 569 + ], + "blocks": [ + { + "bbox": [ + 241, + 503, + 305, + 569 + ], + "lines": [ + { + "bbox": [ + 241, + 503, + 305, + 569 + ], + "spans": [ + { + "bbox": [ + 241, + 503, + 305, + 569 + ], + "type": "image", + "image_path": "ddc3088ac10e629610dd0c082b76968901c1b52d104e8c37fefbade2e65a3733.jpg" + } + ] + } + ], + "index": 38, + "angle": 0, + "type": "image_body" + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 307, + 503, + 372, + 569 + ], + "blocks": [ + { + "bbox": [ + 307, + 503, + 372, + 569 + ], + "lines": [ + { + "bbox": [ + 307, + 503, + 372, + 569 + ], + "spans": [ + { + "bbox": [ + 307, + 503, + 372, + 569 + ], + "type": "image", + "image_path": "f3c6500b7e7a3181a5357c681365421729b32ff4a3cd82aed7559a18094a605c.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 374, + 503, + 438, + 569 + ], + "blocks": [ + { + "bbox": [ + 374, + 503, + 438, + 569 + ], + "lines": [ + { + "bbox": [ + 374, + 503, + 438, + 569 + ], + "spans": [ + { + "bbox": [ + 374, + 503, + 438, + 569 + ], + "type": "image", + "image_path": "653b6d5a6eec440cb460461806bacec14ab4239d71f30dd01e933d24ef1563ce.jpg" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_body" + } + ], + "index": 40 + }, + { + "type": "image", + "bbox": [ + 441, + 503, + 504, + 569 + ], + "blocks": [ + { + "bbox": [ + 441, + 503, + 504, + 569 + ], + "lines": [ + { + "bbox": [ + 441, + 503, + 504, + 569 + ], + "spans": [ + { + "bbox": [ + 441, + 503, + 504, + 569 + ], + "type": "image", + "image_path": "819aa6cbcfc8e84fcc97f67bcc211f72cdabc9300e84024482f2b95678cba9a0.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 106, + 586, + 171, + 652 + ], + "blocks": [ + { + "bbox": [ + 106, + 586, + 171, + 652 + ], + "lines": [ + { + "bbox": [ + 106, + 586, + 171, + 652 + ], + "spans": [ + { + "bbox": [ + 106, + 586, + 171, + 652 + ], + "type": "image", + "image_path": "9442618e655e40e58d55b3178bd2feaa64841f4da06b75be5ec157e1e5bcf74e.jpg" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 184, + 657, + 425, + 665 + ], + "lines": [ + { + "bbox": [ + 184, + 657, + 425, + 665 + ], + "spans": [ + { + "bbox": [ + 184, + 657, + 425, + 665 + ], + "type": "text", + "content": "(g) Here's What You Need to Know About St. Vincent's Apple Music Radio Show" + } + ] + } + ], + "index": 49, + "angle": 0, + "type": "image_caption" + } + ], + "index": 43 + }, + { + "type": "image", + "bbox": [ + 173, + 586, + 238, + 652 + ], + "blocks": [ + { + "bbox": [ + 173, + 586, + 238, + 652 + ], + "lines": [ + { + "bbox": [ + 173, + 586, + 238, + 652 + ], + "spans": [ + { + "bbox": [ + 173, + 586, + 238, + 652 + ], + "type": "image", + "image_path": "1f774b4e91882fdb1bb0e4bf9caee6290f075e3e9a94f88876e63854593c2749.jpg" + } + ] + } + ], + "index": 44, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 678, + 504, + 719 + ], + "lines": [ + { + "bbox": [ + 104, + 678, + 504, + 719 + ], + "spans": [ + { + "bbox": [ + 104, + 678, + 504, + 719 + ], + "type": "text", + "content": "Figure 9: Samples and their original memorized captions from directly optimizing text conditioning to increase " + }, + { + "bbox": [ + 104, + 678, + 504, + 719 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{FLIPD}}" + }, + { + "bbox": [ + 104, + 678, + 504, + 719 + ], + "type": "text", + "content": " and reduce memorization. The images progress through different stages of the optimization process from left to right. While increasing " + }, + { + "bbox": [ + 104, + 678, + 504, + 719 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}" + }, + { + "bbox": [ + 104, + 678, + 504, + 719 + ], + "type": "text", + "content": " helps reduce memorization, uncontrolled increases often introduce chaotic textures, resulting in unrealistic images." + } + ] + } + ], + "index": 50, + "angle": 0, + "type": "image_caption" + } + ], + "index": 44 + }, + { + "type": "image", + "bbox": [ + 241, + 586, + 305, + 651 + ], + "blocks": [ + { + "bbox": [ + 241, + 586, + 305, + 651 + ], + "lines": [ + { + "bbox": [ + 241, + 586, + 305, + 651 + ], + "spans": [ + { + "bbox": [ + 241, + 586, + 305, + 651 + ], + "type": "image", + "image_path": "158c6d1e3ec039eed6d1b7d6ef4c1081af32d2646f7b44a1566ccac6b2ed14b6.jpg" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_body" + } + ], + "index": 45 + }, + { + "type": "image", + "bbox": [ + 307, + 586, + 372, + 651 + ], + "blocks": [ + { + "bbox": [ + 307, + 586, + 372, + 651 + ], + "lines": [ + { + "bbox": [ + 307, + 586, + 372, + 651 + ], + "spans": [ + { + "bbox": [ + 307, + 586, + 372, + 651 + ], + "type": "image", + "image_path": "07ad180a15279181ac53b5fd8557ad0ae004d015befbe03af5f012dc63f9f16f.jpg" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_body" + } + ], + "index": 46 + }, + { + "type": "image", + "bbox": [ + 374, + 586, + 438, + 651 + ], + "blocks": [ + { + "bbox": [ + 374, + 586, + 438, + 651 + ], + "lines": [ + { + "bbox": [ + 374, + 586, + 438, + 651 + ], + "spans": [ + { + "bbox": [ + 374, + 586, + 438, + 651 + ], + "type": "image", + "image_path": "734c00c7038d682db7ed4f5e85f0b114ca450ebc33e16e62a1ab9cfb8e1decce.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + } + ], + "index": 47 + }, + { + "type": "image", + "bbox": [ + 441, + 586, + 504, + 651 + ], + "blocks": [ + { + "bbox": [ + 441, + 586, + 504, + 651 + ], + "lines": [ + { + "bbox": [ + 441, + 586, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 441, + 586, + 504, + 651 + ], + "type": "image", + "image_path": "a8dadad6b16dda92817524a10677f67ae67a392d9184072eb3512f815d6e58a8.jpg" + } + ] + } + ], + "index": 48, + "angle": 0, + "type": "image_body" + } + ], + "index": 48 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 51 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 109, + 82, + 366, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 82, + 366, + 95 + ], + "spans": [ + { + "bbox": [ + 109, + 82, + 366, + 95 + ], + "type": "text", + "content": "Box 1: Instruction for perturbing a caption prompt using GPT-4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 109, + 102, + 323, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 102, + 323, + 127 + ], + "spans": [ + { + "bbox": [ + 109, + 102, + 323, + 127 + ], + "type": "text", + "content": "I have the following caption as a sequence of tokens: " + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 109, + 135, + 475, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 135, + 475, + 158 + ], + "spans": [ + { + "bbox": [ + 109, + 135, + 475, + 158 + ], + "type": "text", + "content": "I want to create a new caption based on this one, but I want to perturb the following tokens: " + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 109, + 167, + 313, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 109, + 167, + 313, + 178 + ], + "spans": [ + { + "bbox": [ + 109, + 167, + 313, + 178 + ], + "type": "text", + "content": "These are the rules to follow for perturbing tokens:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 182, + 501, + 270 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 132, + 182, + 500, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 182, + 500, + 205 + ], + "spans": [ + { + "bbox": [ + 132, + 182, + 500, + 205 + ], + "type": "text", + "content": "1. If the token is a special character or punctuation without significant semantics, you can remove it or change it to any special character" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 208, + 471, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 208, + 471, + 220 + ], + "spans": [ + { + "bbox": [ + 132, + 208, + 471, + 220 + ], + "type": "text", + "content": "2. If the token is a number, you can replace it with another number that is close to it" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 222, + 501, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 222, + 501, + 245 + ], + "spans": [ + { + "bbox": [ + 132, + 222, + 501, + 245 + ], + "type": "text", + "content": "3. If the token is a special name, such as the name of someone or some place or some culture, it should not be replaced" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 248, + 500, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 248, + 500, + 270 + ], + "spans": [ + { + "bbox": [ + 132, + 248, + 500, + 270 + ], + "type": "text", + "content": "4. If the token is any other word, you can replace and rephrase it with any synonym that makes sense in the context" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 108, + 274, + 502, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 274, + 502, + 309 + ], + "spans": [ + { + "bbox": [ + 108, + 274, + 502, + 309 + ], + "type": "text", + "content": "Given these requirements, please provide me with a new caption, not as a sequence of tokens, but as a natural language sentence that semantically matches closely with the original caption except for the perturbed tokens. Do not say anything else in response, only provide the new caption." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 328, + 506, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 328, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 328, + 506, + 417 + ], + "type": "text", + "content": "Qualitative Comparison Figure 10 presents a qualitative comparison of our GPT-based perturbations applied to three memorized prompts. We have selected these specific examples to illustrate how the prompt perturbations function in practice. In this case, we set " + }, + { + "bbox": [ + 104, + 328, + 506, + 417 + ], + "type": "inline_equation", + "content": "k = 4" + }, + { + "bbox": [ + 104, + 328, + 506, + 417 + ], + "type": "text", + "content": " and randomly perturb the tokens based on attributions derived from " + }, + { + "bbox": [ + 104, + 328, + 506, + 417 + ], + "type": "inline_equation", + "content": "A^{\\mathrm{FLIPD}}" + }, + { + "bbox": [ + 104, + 328, + 506, + 417 + ], + "type": "text", + "content": ". Additionally, Figure 10 includes a column demonstrating the random token addition (RTA) approach proposed by Somepalli et al. (2023a), where " + }, + { + "bbox": [ + 104, + 328, + 506, + 417 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 328, + 506, + 417 + ], + "type": "text", + "content": " random tokens from the CLIP library are inserted into the prompt. We see that the GPT-based perturbations better preserve the semantic integrity of the text caption, resulting in images that are not memorized and significantly more coherent." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 105, + 170, + 233, + 300 + ], + "blocks": [ + { + "bbox": [ + 130, + 151, + 205, + 161 + ], + "lines": [ + { + "bbox": [ + 130, + 151, + 205, + 161 + ], + "spans": [ + { + "bbox": [ + 130, + 151, + 205, + 161 + ], + "type": "text", + "content": "Memorized Prompt" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 105, + 170, + 233, + 300 + ], + "lines": [ + { + "bbox": [ + 105, + 170, + 233, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 170, + 233, + 300 + ], + "type": "image", + "image_path": "19f3de681abc50752cf79d3f4b3059b034dbbd820324f47089bef59321fd6013.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 300, + 230, + 318 + ], + "lines": [ + { + "bbox": [ + 105, + 300, + 230, + 318 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 230, + 318 + ], + "type": "text", + "content": "Talks on the Precepts and Buddhist Ethics" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 239, + 170, + 369, + 300 + ], + "blocks": [ + { + "bbox": [ + 255, + 151, + 349, + 162 + ], + "lines": [ + { + "bbox": [ + 255, + 151, + 349, + 162 + ], + "spans": [ + { + "bbox": [ + 255, + 151, + 349, + 162 + ], + "type": "inline_equation", + "content": "\\mathbf{GPT} + \\mathcal{A}^{\\mathrm{FLIPD}}" + }, + { + "bbox": [ + 255, + 151, + 349, + 162 + ], + "type": "text", + "content": " Mitigation" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 239, + 170, + 369, + 300 + ], + "lines": [ + { + "bbox": [ + 239, + 170, + 369, + 300 + ], + "spans": [ + { + "bbox": [ + 239, + 170, + 369, + 300 + ], + "type": "image", + "image_path": "beda33e1363f86a92d529bee07f51e7c507b0c8a122f72dafc4e0927f1e58a4c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 300, + 365, + 319 + ], + "lines": [ + { + "bbox": [ + 239, + 300, + 365, + 319 + ], + "spans": [ + { + "bbox": [ + 239, + 300, + 365, + 319 + ], + "type": "text", + "content": "discussions about the precepts and Buddhist principles" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 105, + 328, + 234, + 456 + ], + "blocks": [ + { + "bbox": [ + 105, + 328, + 234, + 456 + ], + "lines": [ + { + "bbox": [ + 105, + 328, + 234, + 456 + ], + "spans": [ + { + "bbox": [ + 105, + 328, + 234, + 456 + ], + "type": "image", + "image_path": "aecfac9032b2c9dff38370900e6a67c97672bd7777291d0de9facf3bbefc3eaa.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 457, + 230, + 477 + ], + "lines": [ + { + "bbox": [ + 105, + 457, + 230, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 230, + 477 + ], + "type": "text", + "content": "The Long Dark Gets First Trailer, Steam Early Access" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 240, + 328, + 369, + 456 + ], + "blocks": [ + { + "bbox": [ + 240, + 328, + 369, + 456 + ], + "lines": [ + { + "bbox": [ + 240, + 328, + 369, + 456 + ], + "spans": [ + { + "bbox": [ + 240, + 328, + 369, + 456 + ], + "type": "image", + "image_path": "3db6f34f485dcfb8c3972151cdc102e4b95c1bdf9427f8c0aa1acb5e4736967d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 457, + 365, + 477 + ], + "lines": [ + { + "bbox": [ + 239, + 457, + 365, + 477 + ], + "spans": [ + { + "bbox": [ + 239, + 457, + 365, + 477 + ], + "type": "text", + "content": "The Long Dark Gets First Trailer; Steam Early Access" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 105, + 485, + 234, + 614 + ], + "blocks": [ + { + "bbox": [ + 105, + 485, + 234, + 614 + ], + "lines": [ + { + "bbox": [ + 105, + 485, + 234, + 614 + ], + "spans": [ + { + "bbox": [ + 105, + 485, + 234, + 614 + ], + "type": "image", + "image_path": "e206096ab041da78e8c02515189a5f61072763f69083e6e9608049d6ee1b1cfc.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 615, + 223, + 624 + ], + "lines": [ + { + "bbox": [ + 105, + 615, + 223, + 624 + ], + "spans": [ + { + "bbox": [ + 105, + 615, + 223, + 624 + ], + "type": "text", + "content": "Sound Advice with John W Doyle" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 648, + 504, + 669 + ], + "lines": [ + { + "bbox": [ + 104, + 648, + 504, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 648, + 504, + 669 + ], + "type": "text", + "content": "Figure 10: Comparison of mitigation approaches. The tokens highlighted in red indicate the changes and perturbations made by each approach." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 240, + 485, + 369, + 614 + ], + "blocks": [ + { + "bbox": [ + 240, + 485, + 369, + 614 + ], + "lines": [ + { + "bbox": [ + 240, + 485, + 369, + 614 + ], + "spans": [ + { + "bbox": [ + 240, + 485, + 369, + 614 + ], + "type": "image", + "image_path": "00bc14d79bfe184577e6b1bf6f308882ed08fef44360b8032c6227730241fb91.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 239, + 615, + 358, + 625 + ], + "lines": [ + { + "bbox": [ + 239, + 615, + 358, + 625 + ], + "spans": [ + { + "bbox": [ + 239, + 615, + 358, + 625 + ], + "type": "text", + "content": "sound guidance with john w doyle" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 375, + 170, + 504, + 300 + ], + "blocks": [ + { + "bbox": [ + 383, + 151, + 492, + 162 + ], + "lines": [ + { + "bbox": [ + 383, + 151, + 492, + 162 + ], + "spans": [ + { + "bbox": [ + 383, + 151, + 492, + 162 + ], + "type": "text", + "content": "RTA (Somepalli et al., 2023b)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 375, + 170, + 504, + 300 + ], + "lines": [ + { + "bbox": [ + 375, + 170, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 375, + 170, + 504, + 300 + ], + "type": "image", + "image_path": "4425afdc0dff077d747ecb8dcec93410cc302776858f5d6477f6fea4757785e8.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 375, + 300, + 500, + 327 + ], + "lines": [ + { + "bbox": [ + 375, + 300, + 500, + 327 + ], + "spans": [ + { + "bbox": [ + 375, + 300, + 500, + 327 + ], + "type": "text", + "content": "Talks mellon dragonball on the villar Precepts and reformed Buddhist Ethics" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 375, + 328, + 504, + 456 + ], + "blocks": [ + { + "bbox": [ + 375, + 328, + 504, + 456 + ], + "lines": [ + { + "bbox": [ + 375, + 328, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 375, + 328, + 504, + 456 + ], + "type": "image", + "image_path": "f010fe174b21c51390cb7667963bbfce64fa309b01a2194ae1a87dc224a73ef8.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 375, + 457, + 500, + 486 + ], + "lines": [ + { + "bbox": [ + 375, + 457, + 500, + 486 + ], + "spans": [ + { + "bbox": [ + 375, + 457, + 500, + 486 + ], + "type": "text", + "content": "barbershop relying The idal Long Dark Gets First Trailer, Steam Early ghorn Access" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 375, + 487, + 504, + 614 + ], + "blocks": [ + { + "bbox": [ + 375, + 487, + 504, + 614 + ], + "lines": [ + { + "bbox": [ + 375, + 487, + 504, + 614 + ], + "spans": [ + { + "bbox": [ + 375, + 487, + 504, + 614 + ], + "type": "image", + "image_path": "19282f1dd3d57346554569d47d0e09cb87607855214be9468de0c5200791c4fa.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 374, + 615, + 499, + 635 + ], + "lines": [ + { + "bbox": [ + 374, + 615, + 499, + 635 + ], + "spans": [ + { + "bbox": [ + 374, + 615, + 499, + 635 + ], + "type": "text", + "content": "Sound Advice with John payments grill hsfb acadi W Doyle" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 97, + 504, + 171 + ], + "blocks": [ + { + "bbox": [ + 105, + 82, + 326, + 94 + ], + "lines": [ + { + "bbox": [ + 105, + 82, + 326, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 326, + 94 + ], + "type": "text", + "content": "D.5 MORE EXAMPLES OFTOKENATTRIBUTIONS" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 108, + 97, + 504, + 171 + ], + "lines": [ + { + "bbox": [ + 108, + 97, + 504, + 171 + ], + "spans": [ + { + "bbox": [ + 108, + 97, + 504, + 171 + ], + "type": "image", + "image_path": "cc8002737295273ab5fc2e41ee95be487251855cc8caebcbee144ecf413a30b7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 115, + 173, + 211, + 269 + ], + "blocks": [ + { + "bbox": [ + 115, + 173, + 211, + 269 + ], + "lines": [ + { + "bbox": [ + 115, + 173, + 211, + 269 + ], + "spans": [ + { + "bbox": [ + 115, + 173, + 211, + 269 + ], + "type": "image", + "image_path": "b8b85ca477998dca945409aa0ff3fda0f027db61d1fdc5efe3a4b35ec09821d3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 272, + 185, + 283 + ], + "lines": [ + { + "bbox": [ + 131, + 272, + 185, + 283 + ], + "spans": [ + { + "bbox": [ + 131, + 272, + 185, + 283 + ], + "type": "text", + "content": "Training Image" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 212, + 173, + 309, + 269 + ], + "blocks": [ + { + "bbox": [ + 212, + 173, + 309, + 269 + ], + "lines": [ + { + "bbox": [ + 212, + 173, + 309, + 269 + ], + "spans": [ + { + "bbox": [ + 212, + 173, + 309, + 269 + ], + "type": "image", + "image_path": "9f4f9a59ab39ceb878567d04fd4bdeedf6948307d3500c6e63ae7da322be335e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 230, + 272, + 289, + 283 + ], + "lines": [ + { + "bbox": [ + 230, + 272, + 289, + 283 + ], + "spans": [ + { + "bbox": [ + 230, + 272, + 289, + 283 + ], + "type": "text", + "content": "Generated Image" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 310, + 173, + 406, + 269 + ], + "blocks": [ + { + "bbox": [ + 310, + 173, + 406, + 269 + ], + "lines": [ + { + "bbox": [ + 310, + 173, + 406, + 269 + ], + "spans": [ + { + "bbox": [ + 310, + 173, + 406, + 269 + ], + "type": "image", + "image_path": "c5f96b6264cfe5a2971e2ea60d9ad8fee9b7058c10c51678cbb0d7111f7f7374.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 270, + 401, + 281 + ], + "lines": [ + { + "bbox": [ + 315, + 270, + 401, + 281 + ], + "spans": [ + { + "bbox": [ + 315, + 270, + 401, + 281 + ], + "type": "text", + "content": "GPT + ACFG Mitigation" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 408, + 173, + 505, + 269 + ], + "blocks": [ + { + "bbox": [ + 408, + 173, + 505, + 269 + ], + "lines": [ + { + "bbox": [ + 408, + 173, + 505, + 269 + ], + "spans": [ + { + "bbox": [ + 408, + 173, + 505, + 269 + ], + "type": "image", + "image_path": "5dbbe1d7d1975d551e8f2b4c803de8ea3628697114cdd471f6ca84078d25f90c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 413, + 270, + 499, + 282 + ], + "lines": [ + { + "bbox": [ + 413, + 270, + 499, + 282 + ], + "spans": [ + { + "bbox": [ + 413, + 270, + 499, + 282 + ], + "type": "text", + "content": "GPT + " + }, + { + "bbox": [ + 413, + 270, + 499, + 282 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\text{FLIPD}}" + }, + { + "bbox": [ + 413, + 270, + 499, + 282 + ], + "type": "text", + "content": " Mitigation" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 108, + 303, + 503, + 376 + ], + "blocks": [ + { + "bbox": [ + 194, + 291, + 416, + 300 + ], + "lines": [ + { + "bbox": [ + 194, + 291, + 416, + 300 + ], + "spans": [ + { + "bbox": [ + 194, + 291, + 416, + 300 + ], + "type": "text", + "content": "(a) All the methods detect \"Netflix\" a private trademark driving memorization." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 108, + 303, + 503, + 376 + ], + "lines": [ + { + "bbox": [ + 108, + 303, + 503, + 376 + ], + "spans": [ + { + "bbox": [ + 108, + 303, + 503, + 376 + ], + "type": "image", + "image_path": "9ee32f62a5f4041b21d9120a98d08ff6064a5810a5bc7bf961d14d7f4fa0bc90.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 123, + 379, + 213, + 459 + ], + "blocks": [ + { + "bbox": [ + 123, + 379, + 213, + 459 + ], + "lines": [ + { + "bbox": [ + 123, + 379, + 213, + 459 + ], + "spans": [ + { + "bbox": [ + 123, + 379, + 213, + 459 + ], + "type": "image", + "image_path": "a4efe1b32e017832af294044d5f76b9025609eb7e2e95ac2a33478e75ac7590b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 123, + 459, + 213, + 470 + ], + "lines": [ + { + "bbox": [ + 123, + 459, + 213, + 470 + ], + "spans": [ + { + "bbox": [ + 123, + 459, + 213, + 470 + ], + "type": "text", + "content": "INTERVIEW" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 137, + 473, + 187, + 483 + ], + "lines": [ + { + "bbox": [ + 137, + 473, + 187, + 483 + ], + "spans": [ + { + "bbox": [ + 137, + 473, + 187, + 483 + ], + "type": "text", + "content": "Training Image" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 216, + 379, + 306, + 459 + ], + "blocks": [ + { + "bbox": [ + 216, + 379, + 306, + 459 + ], + "lines": [ + { + "bbox": [ + 216, + 379, + 306, + 459 + ], + "spans": [ + { + "bbox": [ + 216, + 379, + 306, + 459 + ], + "type": "image", + "image_path": "d507567457cc12f77b2fbbf7cce192339e9ac918a9b27517f67c6fda2f020e46.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 216, + 459, + 306, + 469 + ], + "lines": [ + { + "bbox": [ + 216, + 459, + 306, + 469 + ], + "spans": [ + { + "bbox": [ + 216, + 459, + 306, + 469 + ], + "type": "text", + "content": "INTeRiXeN" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 231, + 473, + 288, + 483 + ], + "lines": [ + { + "bbox": [ + 231, + 473, + 288, + 483 + ], + "spans": [ + { + "bbox": [ + 231, + 473, + 288, + 483 + ], + "type": "text", + "content": "Generated Image" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 309, + 379, + 400, + 470 + ], + "blocks": [ + { + "bbox": [ + 309, + 379, + 400, + 470 + ], + "lines": [ + { + "bbox": [ + 309, + 379, + 400, + 470 + ], + "spans": [ + { + "bbox": [ + 309, + 379, + 400, + 470 + ], + "type": "image", + "image_path": "c3304b14407b1e6d109f26cf9ea86b733af6923836ade9b8f1c1c65f23502712.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 472, + 395, + 483 + ], + "lines": [ + { + "bbox": [ + 313, + 472, + 395, + 483 + ], + "spans": [ + { + "bbox": [ + 313, + 472, + 395, + 483 + ], + "type": "text", + "content": "GPT + CFG Mitigation" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 404, + 378, + 494, + 470 + ], + "blocks": [ + { + "bbox": [ + 404, + 378, + 494, + 470 + ], + "lines": [ + { + "bbox": [ + 404, + 378, + 494, + 470 + ], + "spans": [ + { + "bbox": [ + 404, + 378, + 494, + 470 + ], + "type": "image", + "image_path": "f3618e705a92a9270d87e22ebc93240f26af277d0d8523d23f237f62e7b33c40.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 406, + 472, + 488, + 483 + ], + "lines": [ + { + "bbox": [ + 406, + 472, + 488, + 483 + ], + "spans": [ + { + "bbox": [ + 406, + 472, + 488, + 483 + ], + "type": "text", + "content": "GPT + AFLIPD Mitigation" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 108, + 504, + 503, + 578 + ], + "blocks": [ + { + "bbox": [ + 114, + 492, + 495, + 502 + ], + "lines": [ + { + "bbox": [ + 114, + 492, + 495, + 502 + ], + "spans": [ + { + "bbox": [ + 114, + 492, + 495, + 502 + ], + "type": "text", + "content": "(b) All the methods detect \"interview\" (the movie title) as the driver for memorization, " + }, + { + "bbox": [ + 114, + 492, + 495, + 502 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\mathrm{CFG}}" + }, + { + "bbox": [ + 114, + 492, + 495, + 502 + ], + "type": "text", + "content": " also detects \"Sony\" as a significant token." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 108, + 504, + 503, + 578 + ], + "lines": [ + { + "bbox": [ + 108, + 504, + 503, + 578 + ], + "spans": [ + { + "bbox": [ + 108, + 504, + 503, + 578 + ], + "type": "image", + "image_path": "512485a5f49cf69032623f47abec189c6779f4cf21f6044337e6d766b550ab06.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 116, + 578, + 207, + 670 + ], + "blocks": [ + { + "bbox": [ + 116, + 578, + 207, + 670 + ], + "lines": [ + { + "bbox": [ + 116, + 578, + 207, + 670 + ], + "spans": [ + { + "bbox": [ + 116, + 578, + 207, + 670 + ], + "type": "image", + "image_path": "a4ffc0474080c238ac21de3db8296bc7a3dc1888d3efef3df4944006e18248ff.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 129, + 674, + 181, + 684 + ], + "lines": [ + { + "bbox": [ + 129, + 674, + 181, + 684 + ], + "spans": [ + { + "bbox": [ + 129, + 674, + 181, + 684 + ], + "type": "text", + "content": "Training Image" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 210, + 578, + 301, + 670 + ], + "blocks": [ + { + "bbox": [ + 210, + 578, + 301, + 670 + ], + "lines": [ + { + "bbox": [ + 210, + 578, + 301, + 670 + ], + "spans": [ + { + "bbox": [ + 210, + 578, + 301, + 670 + ], + "type": "image", + "image_path": "b2dd67adc3c88cc459cf858b7944074ade4fe8e4936325c9d67757903a571230.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 225, + 673, + 282, + 684 + ], + "lines": [ + { + "bbox": [ + 225, + 673, + 282, + 684 + ], + "spans": [ + { + "bbox": [ + 225, + 673, + 282, + 684 + ], + "type": "text", + "content": "Generated Image" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 304, + 578, + 396, + 671 + ], + "blocks": [ + { + "bbox": [ + 304, + 578, + 396, + 671 + ], + "lines": [ + { + "bbox": [ + 304, + 578, + 396, + 671 + ], + "spans": [ + { + "bbox": [ + 304, + 578, + 396, + 671 + ], + "type": "image", + "image_path": "76f3a81374e9757358982544445be8da143877a4872f92f99b1b8f04a82b806a.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 673, + 391, + 683 + ], + "lines": [ + { + "bbox": [ + 309, + 673, + 391, + 683 + ], + "spans": [ + { + "bbox": [ + 309, + 673, + 391, + 683 + ], + "type": "text", + "content": "GPT + " + }, + { + "bbox": [ + 309, + 673, + 391, + 683 + ], + "type": "inline_equation", + "content": "\\mathcal{A}^{\\text{CFG}}" + }, + { + "bbox": [ + 309, + 673, + 391, + 683 + ], + "type": "text", + "content": " Mitigation" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 203, + 693, + 406, + 703 + ], + "lines": [ + { + "bbox": [ + 203, + 693, + 406, + 703 + ], + "spans": [ + { + "bbox": [ + 203, + 693, + 406, + 703 + ], + "type": "text", + "content": "(c) All the methods detect \"podcast\" as the token driving memorization." + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 711, + 506, + 742 + ], + "lines": [ + { + "bbox": [ + 104, + 711, + 506, + 742 + ], + "spans": [ + { + "bbox": [ + 104, + 711, + 506, + 742 + ], + "type": "text", + "content": "Figure 11: Memorized Stable Diffusion samples with a comparison of token attributions based on three different memorization metrics: the CFG vector norm proposed by Wen et al. (2023), the CFG-adjusted score " + }, + { + "bbox": [ + 104, + 711, + 506, + 742 + ], + "type": "inline_equation", + "content": "s_{\\theta}^{\\mathrm{CFG}}(x; t, c)" + }, + { + "bbox": [ + 104, + 711, + 506, + 742 + ], + "type": "text", + "content": " norm, and the FLIPD estimate for " + }, + { + "bbox": [ + 104, + 711, + 506, + 742 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{\\theta}(\\cdot \\mid c)" + }, + { + "bbox": [ + 104, + 711, + 506, + 742 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 399, + 578, + 492, + 670 + ], + "blocks": [ + { + "bbox": [ + 399, + 578, + 492, + 670 + ], + "lines": [ + { + "bbox": [ + 399, + 578, + 492, + 670 + ], + "spans": [ + { + "bbox": [ + 399, + 578, + 492, + 670 + ], + "type": "image", + "image_path": "23f7fd3ddbb8e180aab08fbaaf24c022351b1ed5ecb9a0064d60ff79e2ba5ef5.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 403, + 672, + 486, + 683 + ], + "lines": [ + { + "bbox": [ + 403, + 672, + 486, + 683 + ], + "spans": [ + { + "bbox": [ + 403, + 672, + 486, + 683 + ], + "type": "text", + "content": "GPT + AFLPD Mitigation" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "image_caption" + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 35 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 169, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 169, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 169, + 94 + ], + "type": "text", + "content": "E PROOFS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 394, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 394, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 394, + 118 + ], + "type": "text", + "content": "We restate each theorem in full formality below along with their proofs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": "Throughout this section, we let " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "P_*" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "P_\\theta" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": " be the probability measures of the ground truth data and model, respectively. We assume that the respective supports of " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "P_*" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "P_\\theta" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": " are " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_*" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_\\theta \\subset \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": ", smooth Riemannian submanifolds of the Euclidean space " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": " with metrics " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "g_*" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "g_\\theta" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": " respectively. We denote the Riemannian measures on " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_*" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_\\theta" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "\\mu_*" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "\\mu_\\theta" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": ", respectively, so that " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "p_*(x) = \\mathrm{d}P_* / \\mathrm{d}\\mu_*(x)" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "inline_equation", + "content": "p_\\theta(x) = \\mathrm{d}P_\\theta / \\mathrm{d}\\mu_\\theta(x)" + }, + { + "bbox": [ + 104, + 123, + 506, + 201 + ], + "type": "text", + "content": ". As mentioned in Section 2, we take a lax definition of manifold which allows them to vary in dimensionality in different components. A single manifold under our definition is equivalent to a disjoint union of manifolds under the more standard definition." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 214, + 206, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 214, + 206, + 224 + ], + "spans": [ + { + "bbox": [ + 105, + 214, + 206, + 224 + ], + "type": "text", + "content": "E.1 PROPOSITION 3.1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 235, + 504, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 235, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 235, + 504, + 258 + ], + "type": "text", + "content": "Lemma E.1. Assume that " + }, + { + "bbox": [ + 104, + 235, + 504, + 258 + ], + "type": "inline_equation", + "content": "p_{*}(x) > 0" + }, + { + "bbox": [ + 104, + 235, + 504, + 258 + ], + "type": "text", + "content": " for every " + }, + { + "bbox": [ + 104, + 235, + 504, + 258 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 235, + 504, + 258 + ], + "type": "text", + "content": ", and let " + }, + { + "bbox": [ + 104, + 235, + 504, + 258 + ], + "type": "inline_equation", + "content": "x_0 \\in \\mathcal{M}_*" + }, + { + "bbox": [ + 104, + 235, + 504, + 258 + ], + "type": "text", + "content": ". Then, the following are equivalent:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 129, + 266, + 223, + 298 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 129, + 266, + 223, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 266, + 223, + 280 + ], + "spans": [ + { + "bbox": [ + 129, + 266, + 223, + 280 + ], + "type": "text", + "content": "1. " + }, + { + "bbox": [ + 129, + 266, + 223, + 280 + ], + "type": "inline_equation", + "content": "P_{*}(\\{x_{0}\\}) > 0" + }, + { + "bbox": [ + 129, + 266, + 223, + 280 + ], + "type": "text", + "content": " , and" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 129, + 285, + 204, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 285, + 204, + 298 + ], + "spans": [ + { + "bbox": [ + 129, + 285, + 204, + 298 + ], + "type": "text", + "content": "2. " + }, + { + "bbox": [ + 129, + 285, + 204, + 298 + ], + "type": "inline_equation", + "content": "LID_{*}(x_{0}) = 0" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 312, + 133, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 312, + 133, + 323 + ], + "spans": [ + { + "bbox": [ + 105, + 312, + 133, + 323 + ], + "type": "text", + "content": "Proof." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 141, + 336, + 294, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 336, + 294, + 350 + ], + "spans": [ + { + "bbox": [ + 141, + 336, + 294, + 350 + ], + "type": "text", + "content": "(1) " + }, + { + "bbox": [ + 141, + 336, + 294, + 350 + ], + "type": "inline_equation", + "content": "\\Longrightarrow" + }, + { + "bbox": [ + 141, + 336, + 294, + 350 + ], + "type": "text", + "content": " (2) Assume " + }, + { + "bbox": [ + 141, + 336, + 294, + 350 + ], + "type": "inline_equation", + "content": "P_{*}(\\{x_{0}\\}) > 0" + }, + { + "bbox": [ + 141, + 336, + 294, + 350 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 244, + 355, + 505, + 383 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 355, + 505, + 383 + ], + "spans": [ + { + "bbox": [ + 244, + 355, + 505, + 383 + ], + "type": "interline_equation", + "content": "0 < P _ {*} \\left(\\left\\{x _ {0} \\right\\}\\right) = \\int_ {\\left\\{x _ {0} \\right\\}} p _ {*} (x) \\mathrm {d} \\mu_ {*} (x), \\tag {29}", + "image_path": "397cce4e502727d88ea6653a023afb859224999c14003aa02c0070027e608b30.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 140, + 388, + 506, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 388, + 506, + 411 + ], + "spans": [ + { + "bbox": [ + 140, + 388, + 506, + 411 + ], + "type": "text", + "content": "which necessitates " + }, + { + "bbox": [ + 140, + 388, + 506, + 411 + ], + "type": "inline_equation", + "content": "\\mu_{*}(\\{x_{0}\\}) > 0" + }, + { + "bbox": [ + 140, + 388, + 506, + 411 + ], + "type": "text", + "content": ". If we had " + }, + { + "bbox": [ + 140, + 388, + 506, + 411 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{*}(x_0) > 0" + }, + { + "bbox": [ + 140, + 388, + 506, + 411 + ], + "type": "text", + "content": " this would incur a contradiction: letting " + }, + { + "bbox": [ + 140, + 388, + 506, + 411 + ], + "type": "inline_equation", + "content": "(U,\\phi)" + }, + { + "bbox": [ + 140, + 388, + 506, + 411 + ], + "type": "text", + "content": " be a chart around " + }, + { + "bbox": [ + 140, + 388, + 506, + 411 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 140, + 388, + 506, + 411 + ], + "type": "text", + "content": ", then by the definition of " + }, + { + "bbox": [ + 140, + 388, + 506, + 411 + ], + "type": "inline_equation", + "content": "\\mu_{*}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 240, + 417, + 505, + 444 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 240, + 417, + 505, + 444 + ], + "spans": [ + { + "bbox": [ + 240, + 417, + 505, + 444 + ], + "type": "interline_equation", + "content": "0 < \\mu_ {*} (\\{x _ {0} \\}) = \\int_ {\\phi (\\{x _ {0} \\})} \\sqrt {\\det (g _ {*})} d \\lambda , \\tag {30}", + "image_path": "f71bf5bb0966bf1a799bfd910c99a17cec132ab671c692f98f9945d8c31847b2.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 140, + 451, + 506, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 451, + 506, + 486 + ], + "spans": [ + { + "bbox": [ + 140, + 451, + 506, + 486 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 140, + 451, + 506, + 486 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 140, + 451, + 506, + 486 + ], + "type": "text", + "content": " is the Lebesgue measure on " + }, + { + "bbox": [ + 140, + 451, + 506, + 486 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{\\mathrm{LID}_*(x_0)}" + }, + { + "bbox": [ + 140, + 451, + 506, + 486 + ], + "type": "text", + "content": " or the counting measure if " + }, + { + "bbox": [ + 140, + 451, + 506, + 486 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x_0) = 0" + }, + { + "bbox": [ + 140, + 451, + 506, + 486 + ], + "type": "text", + "content": ". Due to the singleton domain of integration, positivity of the integral in Equation 30 would be impossible unless " + }, + { + "bbox": [ + 140, + 451, + 506, + 486 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x_0) = 0" + }, + { + "bbox": [ + 140, + 451, + 506, + 486 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "spans": [ + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "content": "(2) " + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "inline_equation", + "content": "\\Rightarrow" + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "content": " (1) Suppose " + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}(x_0) = 0" + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "content": ". This implies that " + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "inline_equation", + "content": "\\{x_0\\}" + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "content": " is an open set in the subspace topology of " + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_*" + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "inline_equation", + "content": "x_0 \\in \\operatorname{supp} \\mu_*" + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "content": ", any open set containing " + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "content": " must have positive measure under " + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "inline_equation", + "content": "\\mu_*" + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "content": ", so that " + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "inline_equation", + "content": "\\mu_*(\\{x_0\\}) > 0" + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "content": ". Then, since " + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "inline_equation", + "content": "P_*(\\{x_0\\}) = p_*(x_0) \\mu_*(\\{x_0\\})" + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "inline_equation", + "content": "p_*(x_0) > 0" + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "content": ", it follows that " + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "inline_equation", + "content": "P_*(\\{x_0\\}) > 0" + }, + { + "bbox": [ + 140, + 492, + 504, + 538 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 494, + 552, + 504, + 561 + ], + "blocks": [ + { + "bbox": [ + 494, + 552, + 504, + 561 + ], + "lines": [ + { + "bbox": [ + 494, + 552, + 504, + 561 + ], + "spans": [ + { + "bbox": [ + 494, + 552, + 504, + 561 + ], + "type": "image", + "image_path": "2bae549e6b12e209c731bf0bf78bac76920b0236498d191169644daaa9739f79.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 574, + 505, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 574, + 505, + 598 + ], + "spans": [ + { + "bbox": [ + 104, + 574, + 505, + 598 + ], + "type": "text", + "content": "Proposition E.2 (Formal Restatement of Proposition 3.1). Assume that " + }, + { + "bbox": [ + 104, + 574, + 505, + 598 + ], + "type": "inline_equation", + "content": "p_{*}(x) > 0" + }, + { + "bbox": [ + 104, + 574, + 505, + 598 + ], + "type": "text", + "content": " for every " + }, + { + "bbox": [ + 104, + 574, + 505, + 598 + ], + "type": "inline_equation", + "content": "x \\in \\mathcal{M}_{*}" + }, + { + "bbox": [ + 104, + 574, + 505, + 598 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 574, + 505, + 598 + ], + "type": "inline_equation", + "content": "\\{x_{i}\\}_{i=1}^{n}" + }, + { + "bbox": [ + 104, + 574, + 505, + 598 + ], + "type": "text", + "content": " be a training dataset drawn independently from " + }, + { + "bbox": [ + 104, + 574, + 505, + 598 + ], + "type": "inline_equation", + "content": "p_{*}(x)" + }, + { + "bbox": [ + 104, + 574, + 505, + 598 + ], + "type": "text", + "content": ". Then:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 127, + 606, + 505, + 659 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 128, + 606, + 504, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 606, + 504, + 630 + ], + "spans": [ + { + "bbox": [ + 128, + 606, + 504, + 630 + ], + "type": "text", + "content": "1. If duplicates occur in " + }, + { + "bbox": [ + 128, + 606, + 504, + 630 + ], + "type": "inline_equation", + "content": "\\{x_{i}\\}_{i = 1}^{n}" + }, + { + "bbox": [ + 128, + 606, + 504, + 630 + ], + "type": "text", + "content": " with positive probability, then they will occur at a point " + }, + { + "bbox": [ + 128, + 606, + 504, + 630 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 128, + 606, + 504, + 630 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 128, + 606, + 504, + 630 + ], + "type": "inline_equation", + "content": "LID_{*}(x_{0}) = 0" + }, + { + "bbox": [ + 128, + 606, + 504, + 630 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 127, + 636, + 505, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 636, + 505, + 659 + ], + "spans": [ + { + "bbox": [ + 127, + 636, + 505, + 659 + ], + "type": "text", + "content": "2. If " + }, + { + "bbox": [ + 127, + 636, + 505, + 659 + ], + "type": "inline_equation", + "content": "LID_{*}(x_{0}) = 0" + }, + { + "bbox": [ + 127, + 636, + 505, + 659 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 127, + 636, + 505, + 659 + ], + "type": "inline_equation", + "content": "x_0\\in \\mathcal{M}_*" + }, + { + "bbox": [ + 127, + 636, + 505, + 659 + ], + "type": "text", + "content": ", then the probability of duplication in " + }, + { + "bbox": [ + 127, + 636, + 505, + 659 + ], + "type": "inline_equation", + "content": "\\{x_i\\}_{i = 1}^n" + }, + { + "bbox": [ + 127, + 636, + 505, + 659 + ], + "type": "text", + "content": " will converge to 1 as " + }, + { + "bbox": [ + 127, + 636, + 505, + 659 + ], + "type": "inline_equation", + "content": "n\\to \\infty" + }, + { + "bbox": [ + 127, + 636, + 505, + 659 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 673, + 133, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 673, + 133, + 685 + ], + "spans": [ + { + "bbox": [ + 105, + 673, + 133, + 685 + ], + "type": "text", + "content": "Proof." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "text", + "content": "(1) Due to Lemma E.1, it suffices to show that any duplicates in " + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "\\{x_i\\}_{i=1}^n" + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "text", + "content": " must occur at a point " + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "P_*(\\{x_0\\}) > 0" + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "text", + "content": ". It is thus enough to show that if " + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "P_*(\\{x_0\\}) = 0" + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "text", + "content": " for every " + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "x_0 \\in \\mathcal{M}_*" + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "text", + "content": ", then " + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "P_*(x_1 = x_2) = 0" + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "text", + "content": ". Assume that " + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "inline_equation", + "content": "P_*(\\{x_0\\}) = 0" + }, + { + "bbox": [ + 140, + 698, + 506, + 734 + ], + "type": "text", + "content": " for every" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 140, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 140, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "x_0 \\in \\mathcal{M}_*" + }, + { + "bbox": [ + 140, + 82, + 504, + 106 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 140, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "x_1" + }, + { + "bbox": [ + 140, + 82, + 504, + 106 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 140, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "x_2" + }, + { + "bbox": [ + 140, + 82, + 504, + 106 + ], + "type": "text", + "content": " are independent, " + }, + { + "bbox": [ + 140, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "P_*(x_1 = x_2) = P_* \\times P_*(D)" + }, + { + "bbox": [ + 140, + 82, + 504, + 106 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 140, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "D = \\{(x, x) \\in \\mathcal{M}_* \\times \\mathcal{M}_* \\mid x \\in \\mathcal{M}_*\\}" + }, + { + "bbox": [ + 140, + 82, + 504, + 106 + ], + "type": "text", + "content": ". We then have:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 182, + 111, + 505, + 166 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 111, + 505, + 166 + ], + "spans": [ + { + "bbox": [ + 182, + 111, + 505, + 166 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} P _ {*} \\times P _ {*} (D) = \\int_ {D} \\mathrm {d} P _ {*} \\times P _ {*} \\left(x _ {1}, x _ {2}\\right) = \\int_ {\\mathcal {M} _ {*}} \\int_ {\\left\\{x _ {2} \\right\\}} \\mathrm {d} P _ {*} \\left(x _ {1}\\right) \\mathrm {d} P _ {*} \\left(x _ {2}\\right) (31) \\\\ = \\int_ {\\mathcal {M} _ {*}} P _ {*} \\left(\\left\\{x _ {2} \\right\\}\\right) \\mathrm {d} P _ {*} \\left(x _ {2}\\right) = 0, (32) \\\\ \\end{array}", + "image_path": "fd41d75f528976ee14b1644c6cf4e5a20b216d11aab1b1defa128c637c6d388a.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 140, + 171, + 504, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 171, + 504, + 195 + ], + "spans": [ + { + "bbox": [ + 140, + 171, + 504, + 195 + ], + "type": "text", + "content": "where the second equality follows from Fubini's theorem (see e.g. Theorem 7.26 in Folland (2013)), and the last equality follows by assumption. This finishes this part of the proof." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "spans": [ + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "type": "text", + "content": "(2) Suppose " + }, + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x_0) = 0" + }, + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "type": "inline_equation", + "content": "x_0 \\in \\mathcal{M}_*" + }, + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "type": "text", + "content": ". By Lemma E.1, we have " + }, + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "type": "inline_equation", + "content": "P_*(\\{x_0\\}) > 0" + }, + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "type": "text", + "content": ". In this case, " + }, + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "type": "inline_equation", + "content": "P_*(x_i = x_0) > 0" + }, + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "type": "inline_equation", + "content": "i \\in \\{1, \\dots, n\\}" + }, + { + "bbox": [ + 140, + 201, + 506, + 225 + ], + "type": "text", + "content": ", meaning that" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 230, + 505, + 300 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 230, + 505, + 300 + ], + "spans": [ + { + "bbox": [ + 140, + 230, + 505, + 300 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} P _ {*} \\left(x _ {i} = x _ {j} \\text {f o r s o m e} 1 \\leq i < j \\leq n\\right) \\geq P _ {*} \\left(x _ {i} = x _ {j} = x _ {0} \\text {f o r s o m e} 1 \\leq i < j \\leq n\\right) (33) \\\\ \\geq 1 - P _ {*} \\left(x _ {i} \\neq x _ {0} \\text {f o r a l l} i \\geq 2\\right) (34) \\\\ = 1 - P _ {*} \\left(x _ {2} \\neq x _ {0}\\right) \\dots P _ {*} \\left(x _ {n} \\neq x _ {0}\\right) (35) \\\\ = 1 - \\left(1 - P _ {*} \\left(\\left\\{x _ {0} \\right\\}\\right)\\right) ^ {n - 1} (36) \\\\ \\longrightarrow 1, (37) \\\\ \\end{array}", + "image_path": "ca87a1874ee8432c35abc9e0c19e7cbdb3dbfbc85f237091431623eb215528ba.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 306, + 385, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 306, + 385, + 319 + ], + "spans": [ + { + "bbox": [ + 140, + 306, + 385, + 319 + ], + "type": "text", + "content": "where the last line depicts the limiting behaviour as " + }, + { + "bbox": [ + 140, + 306, + 385, + 319 + ], + "type": "inline_equation", + "content": "n\\to \\infty" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 494, + 332, + 505, + 342 + ], + "blocks": [ + { + "bbox": [ + 494, + 332, + 505, + 342 + ], + "lines": [ + { + "bbox": [ + 494, + 332, + 505, + 342 + ], + "spans": [ + { + "bbox": [ + 494, + 332, + 505, + 342 + ], + "type": "image", + "image_path": "8215dbc752de23689f9b56fb21430abfe391f9166d2c8009210f7b3660a0ffbd.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 357, + 208, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 208, + 369 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 208, + 369 + ], + "type": "text", + "content": "E.2 PROPOSITION 3.2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "spans": [ + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "text", + "content": "Here we presume the joint distribution of model samples and " + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "text", + "content": "-dimensional conditioning inputs " + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "inline_equation", + "content": "(x,c)\\in \\mathbb{R}^{d + k}" + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "text", + "content": " has support " + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "inline_equation", + "content": "S\\subset \\mathbb{R}^{d + k}" + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "text", + "content": " such that " + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "inline_equation", + "content": "\\{x:(x,c)\\in S" + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "inline_equation", + "content": "c\\in \\mathbb{R}^k\\} = \\mathcal{M}_\\theta" + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "text", + "content": ". We define the conditional support of " + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "text", + "content": " given " + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "text", + "content": " to be " + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "inline_equation", + "content": "S(c) = \\{x:(x,c)\\in S\\}" + }, + { + "bbox": [ + 104, + 377, + 504, + 412 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "text", + "content": "Proposition E.3 (Formal Restatement of Proposition 3.2). Let " + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "inline_equation", + "content": "x_0 \\in \\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "inline_equation", + "content": "c \\in \\mathbb{R}^k" + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "text", + "content": ". Suppose that " + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "inline_equation", + "content": "S(c)" + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "text", + "content": " is also a submanifold of " + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "text", + "content": " and denote its LID at " + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "text", + "content": " by " + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "inline_equation", + "content": "LID_{\\theta}(x_0 \\mid c)" + }, + { + "bbox": [ + 104, + 415, + 505, + 439 + ], + "type": "text", + "content": ". We then have" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 250, + 444, + 505, + 457 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 250, + 444, + 505, + 457 + ], + "spans": [ + { + "bbox": [ + 250, + 444, + 505, + 457 + ], + "type": "interline_equation", + "content": "L I D _ {\\theta} \\left(x _ {0} \\mid c\\right) \\leq L I D _ {\\theta} \\left(x _ {0}\\right). \\tag {38}", + "image_path": "c6a5b2890b7f6a1f7ca70fa240692c855d37bc38c05772f62a4073255109f15a.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "text", + "content": "Proof. If " + }, + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "inline_equation", + "content": "S(c)" + }, + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "text", + "content": " is a submanifold of " + }, + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "text", + "content": ", then it is also a submanifold of " + }, + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_{\\theta}" + }, + { + "bbox": [ + 104, + 469, + 504, + 492 + ], + "type": "text", + "content": ". The inequality follows directly." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 506, + 195, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 506, + 195, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 195, + 517 + ], + "type": "text", + "content": "E.3 THEOREM A.3" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 526, + 506, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 526, + 506, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 526, + 506, + 539 + ], + "type": "text", + "content": "Here we show that OD-mem implies data-copying under the definition of Bhattacharjee et al. (2023)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": "Lemma E.4. Suppose " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "(\\mathcal{M},g)" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": "-dimensional smooth Riemannian submanifold of Euclidean space " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": " be the Riemannian measure of " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": ". If " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "B_r^d (x_0)" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": "-dimensional ball of radius " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": " centred at " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": ", then there exist constants " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "C_1^{\\mathcal{M}} > 0" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "C_2^{\\mathcal{M}} > 0" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": " not depending on " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": " such that for all small enough " + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 541, + 506, + 588 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 221, + 592, + 505, + 608 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 592, + 505, + 608 + ], + "spans": [ + { + "bbox": [ + 221, + 592, + 505, + 608 + ], + "type": "interline_equation", + "content": "C _ {1} ^ {\\mathcal {M}} r ^ {d _ {0}} \\leq \\mu \\left(B _ {r} ^ {d} \\left(x _ {0}\\right) \\cap \\mathcal {M}\\right) \\leq C _ {2} ^ {\\mathcal {M}} r ^ {d _ {0}}. \\tag {39}", + "image_path": "a04990c21751f462919d91208ab21af83a2451585fe70c4d32fdcadf619b9551.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "type": "text", + "content": "Proof. Without loss of generality, by rotating and translating, we assume " + }, + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "type": "inline_equation", + "content": "x_0 = 0 \\in \\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "type": "text", + "content": " and that the tangent plane of " + }, + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^d" + }, + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "type": "inline_equation", + "content": "\\mathbb{R}^{d_0} \\times \\{0\\}^{d - d_0}" + }, + { + "bbox": [ + 104, + 621, + 504, + 644 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "text", + "content": "As " + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "text", + "content": " is smooth, in a neighbourhood " + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "inline_equation", + "content": "U" + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "inline_equation", + "content": "x_0 = 0" + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "text", + "content": " can be written of a graph of a function " + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "inline_equation", + "content": "u: U \\subset \\mathbb{R}^{d_0} \\to \\mathbb{R}^{d - d_0}" + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "inline_equation", + "content": "u(0) = 0" + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "text", + "content": " and all first derivatives vanish at 0. Then, for small enough " + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "inline_equation", + "content": "r > 0" + }, + { + "bbox": [ + 104, + 649, + 504, + 683 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 167, + 689, + 505, + 704 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 689, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 167, + 689, + 505, + 704 + ], + "type": "interline_equation", + "content": "B _ {r} ^ {d} \\left(x _ {0}\\right) \\cap \\mathcal {M} = \\left\\{\\left(z, u (z)\\right) \\in \\mathbb {R} ^ {d} \\mid z \\in \\mathbb {R} ^ {d _ {0}}, \\| z \\| ^ {2} + \\| u (z) \\| ^ {2} < r ^ {2} \\right\\}. \\tag {40}", + "image_path": "3b2fe372bc18b53cc17efd1ad0ed503f3ebf408b63a0952c8ff3f9bfd1f90b3f.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 709, + 122, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 709, + 122, + 719 + ], + "spans": [ + { + "bbox": [ + 105, + 709, + 122, + 719 + ], + "type": "text", + "content": "Let" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 211, + 719, + 505, + 734 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 719, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 211, + 719, + 505, + 734 + ], + "type": "interline_equation", + "content": "G (r) = \\{(z, u (z)) \\in \\mathbb {R} ^ {d} \\mid z \\in \\mathbb {R} ^ {d _ {0}}, \\| z \\| < r \\} \\tag {41}", + "image_path": "66a0b66c918a1376af05cd4a29d227dc3638223cbd55956223cd8529621fc6e7.jpg" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 307, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 307, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 307, + 95 + ], + "type": "text", + "content": "be the graph of " + }, + { + "bbox": [ + 104, + 82, + 307, + 95 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 82, + 307, + 95 + ], + "type": "text", + "content": " in the open " + }, + { + "bbox": [ + 104, + 82, + 307, + 95 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 104, + 82, + 307, + 95 + ], + "type": "text", + "content": "-ball " + }, + { + "bbox": [ + 104, + 82, + 307, + 95 + ], + "type": "inline_equation", + "content": "B_r^{d_0}(0)" + }, + { + "bbox": [ + 104, + 82, + 307, + 95 + ], + "type": "text", + "content": ", and" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 211, + 99, + 505, + 114 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 99, + 505, + 114 + ], + "spans": [ + { + "bbox": [ + 211, + 99, + 505, + 114 + ], + "type": "interline_equation", + "content": "\\overline {{G (r)}} = \\{(z, u (z)) \\in \\mathbb {R} ^ {d} \\mid z \\in \\mathbb {R} ^ {d _ {0}}, \\| z \\| \\leq r \\} \\tag {42}", + "image_path": "6a7f23e329226a45591a829fa862a47f1e145bd487360789328ee040ab5e41ca.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "spans": [ + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "text", + "content": "be the graph of " + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "text", + "content": " in the closed " + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "text", + "content": "-ball " + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "inline_equation", + "content": "B_r^{d_0}(0)" + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "text", + "content": ". Note that both are defined when " + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "text", + "content": " is defined, i.e. small enough " + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "text", + "content": ", and are subset of " + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 120, + 506, + 144 + ], + "type": "text", + "content": ". Then it is clear that we have" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 258, + 148, + 505, + 163 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 148, + 505, + 163 + ], + "spans": [ + { + "bbox": [ + 258, + 148, + 505, + 163 + ], + "type": "interline_equation", + "content": "B _ {r} ^ {d} \\left(x _ {0}\\right) \\cap \\mathcal {M} \\subseteq G (r). \\tag {43}", + "image_path": "c8a722ed222b7e31b65452325436f6df10df8fb1d5e5468fbf2f8a546d7e18f6.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "spans": [ + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": "Now, consider the function " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "v(z) = \\frac{\\|u(z)\\|}{\\|z\\|}" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": ". Note that " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "v(z)" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": " is continuous everywhere in " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "B_r^{d_0}(0) \\setminus \\{0\\}" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": " and its derivatives vanish at " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "z = 0" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": ", from the definition, we have " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\lim_{z \\to 0} v(z) = 0" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": " as well. Thus " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "v(z)" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": " can be extended to a continuous function in " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "B_r^{d_0}(0)" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": ". Fix " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "R > 0" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": ", and let " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "K" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": " be the maximum of " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\overline{B_R^{d_0}}" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": ". Then, if " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "\\|z\\| < ar" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "inline_equation", + "content": "a = \\frac{1}{\\sqrt{1 + K^2}}" + }, + { + "bbox": [ + 104, + 167, + 506, + 224 + ], + "type": "text", + "content": ", we have" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 182, + 227, + 505, + 242 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 227, + 505, + 242 + ], + "spans": [ + { + "bbox": [ + 182, + 227, + 505, + 242 + ], + "type": "interline_equation", + "content": "\\left\\| z \\right\\| ^ {2} + \\left\\| u (z) \\right\\| ^ {2} = \\left\\| z \\right\\| ^ {2} \\left(1 + v (z) ^ {2}\\right) \\leq \\left\\| z \\right\\| ^ {2} \\left(1 + K ^ {2}\\right) < r ^ {2}. \\tag {44}", + "image_path": "ae1a67df2c80eda733185a94d31f13a8f7f9b1a317851502ab56734e972a4aaa.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 245, + 389, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 245, + 389, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 245, + 389, + 258 + ], + "type": "text", + "content": "This shows that " + }, + { + "bbox": [ + 104, + 245, + 389, + 258 + ], + "type": "inline_equation", + "content": "G(ar) \\subseteq B_r^d(x_0) \\cap \\mathcal{M}" + }, + { + "bbox": [ + 104, + 245, + 389, + 258 + ], + "type": "text", + "content": " for " + }, + { + "bbox": [ + 104, + 245, + 389, + 258 + ], + "type": "inline_equation", + "content": "0 < r < R" + }, + { + "bbox": [ + 104, + 245, + 389, + 258 + ], + "type": "text", + "content": ". Thus we have" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 217, + 262, + 505, + 277 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 262, + 505, + 277 + ], + "spans": [ + { + "bbox": [ + 217, + 262, + 505, + 277 + ], + "type": "interline_equation", + "content": "\\mu (G (a r)) \\leq \\mu \\left(B _ {r} ^ {d} \\left(x _ {0}\\right) \\cap \\mathcal {M}\\right) \\leq \\mu (G (r)). \\tag {45}", + "image_path": "892af603b6305412d9627a500c5acf47e3637ac7f299f384cdce16f1cb28afab.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 283, + 504, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 283, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 104, + 283, + 504, + 297 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 283, + 504, + 297 + ], + "type": "inline_equation", + "content": "K_{1}" + }, + { + "bbox": [ + 104, + 283, + 504, + 297 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 283, + 504, + 297 + ], + "type": "inline_equation", + "content": "K_{2}" + }, + { + "bbox": [ + 104, + 283, + 504, + 297 + ], + "type": "text", + "content": " be the minimum and maximum of " + }, + { + "bbox": [ + 104, + 283, + 504, + 297 + ], + "type": "inline_equation", + "content": "\\sqrt{\\operatorname*{det}g}" + }, + { + "bbox": [ + 104, + 283, + 504, + 297 + ], + "type": "text", + "content": " over " + }, + { + "bbox": [ + 104, + 283, + 504, + 297 + ], + "type": "inline_equation", + "content": "B_r^{d_0}(0)" + }, + { + "bbox": [ + 104, + 283, + 504, + 297 + ], + "type": "text", + "content": ", respectively. Then we have" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 180, + 300, + 505, + 327 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 300, + 505, + 327 + ], + "spans": [ + { + "bbox": [ + 180, + 300, + 505, + 327 + ], + "type": "interline_equation", + "content": "\\mu (G (r)) = \\int_ {B _ {r} ^ {d _ {0}} (0)} \\sqrt {\\det g} \\mathrm {d} z \\leq \\int_ {B _ {r} ^ {d _ {0}} (0)} K _ {2} \\mathrm {d} z = K _ {2} V _ {d _ {0}} r ^ {d _ {0}} \\tag {46}", + "image_path": "bf8cc9e3782678142c5fb5447db18529ad6c335f89eacd4238d2a94ee5b4b43f.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 331, + 124, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 331, + 124, + 341 + ], + "spans": [ + { + "bbox": [ + 105, + 331, + 124, + 341 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 179, + 339, + 505, + 365 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 339, + 505, + 365 + ], + "spans": [ + { + "bbox": [ + 179, + 339, + 505, + 365 + ], + "type": "interline_equation", + "content": "\\mu (G (r)) = \\int_ {B _ {r} ^ {d _ {0}} (0)} \\sqrt {\\det g} d z \\geq \\int_ {B _ {r} ^ {d _ {0}} (0)} K _ {1} d z = K _ {1} V _ {d _ {0}} r ^ {d _ {0}}, \\tag {47}", + "image_path": "a86055b7db9730dd2a39b1cb46e65cf629a8942d65f0922d103a54751c8c377d.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 367, + 479, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 367, + 479, + 380 + ], + "spans": [ + { + "bbox": [ + 104, + 367, + 479, + 380 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 367, + 479, + 380 + ], + "type": "inline_equation", + "content": "V_{d_0}" + }, + { + "bbox": [ + 104, + 367, + 479, + 380 + ], + "type": "text", + "content": " is the Euclidean volume of the unit " + }, + { + "bbox": [ + 104, + 367, + 479, + 380 + ], + "type": "inline_equation", + "content": "d_0" + }, + { + "bbox": [ + 104, + 367, + 479, + 380 + ], + "type": "text", + "content": "-ball. Combining the above results, we have" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 157, + 384, + 505, + 398 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 384, + 505, + 398 + ], + "spans": [ + { + "bbox": [ + 157, + 384, + 505, + 398 + ], + "type": "interline_equation", + "content": "K _ {1} V _ {d _ {0}} a ^ {d _ {0}} r ^ {d _ {0}} \\leq \\mu (G (a r)) \\leq \\mu \\left(B _ {r} ^ {d} \\left(x _ {0}\\right) \\cap \\mathcal {M}\\right) \\leq \\mu (G (r)) \\leq K _ {2} V _ {d _ {0}} r ^ {d _ {0}}, \\tag {48}", + "image_path": "067010f4b042ef888e483f34c1128cb0e769c6540fe836ae6e980e0a80441ade.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 401, + 379, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 401, + 379, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 379, + 415 + ], + "type": "text", + "content": "which finishes the proof with " + }, + { + "bbox": [ + 104, + 401, + 379, + 415 + ], + "type": "inline_equation", + "content": "C_1^{\\mathcal{M}} = K_1V_{d_0}a^{d_0}" + }, + { + "bbox": [ + 104, + 401, + 379, + 415 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 401, + 379, + 415 + ], + "type": "inline_equation", + "content": "C_2^{\\mathcal{M}} = K_2V_{d_0}" + }, + { + "bbox": [ + 104, + 401, + 379, + 415 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": "Theorem E.5 (Formal Restatement of Theorem A.3). Assume that " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "p_{*}(x)" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": " are continuous and that " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": " is strictly positive. Let " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "x_0 \\in \\mathcal{M}_{\\theta} \\cap \\mathcal{M}_*" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": " and let " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": " be a model undergoing OD-mem at " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": ", i.e. " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "0 \\leq LID_{\\theta}(x_0) < LID_{*}(x_0)" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": ". Then for any " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\lambda > 1" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "0 < \\gamma < 1" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": ", there exists a radius " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "r_0" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": " such that any " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "x \\in B_{r_0}^d(x_0)" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "(\\lambda, \\gamma)" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": "-copy of " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": " according to Definition A.1, and if " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "\\{x_j\\}_{j=1}^m" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": " is generated independently from " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": ", then the probability of " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "(\\lambda, \\gamma)" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": "-copying " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": " converges to 1 as " + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "inline_equation", + "content": "m \\to \\infty" + }, + { + "bbox": [ + 104, + 423, + 504, + 491 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 503, + 317, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 503, + 317, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 503, + 317, + 514 + ], + "type": "text", + "content": "Proof. For an arbitrary value of " + }, + { + "bbox": [ + 105, + 503, + 317, + 514 + ], + "type": "inline_equation", + "content": "r > 0" + }, + { + "bbox": [ + 105, + 503, + 317, + 514 + ], + "type": "text", + "content": ", we have that" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 194, + 519, + 505, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 519, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 194, + 519, + 505, + 540 + ], + "type": "interline_equation", + "content": "P _ {\\theta} \\left(B _ {r} ^ {d} \\left(x _ {0}\\right)\\right) \\geq \\mu_ {\\theta} \\left(B _ {r} ^ {d} \\left(x _ {0}\\right) \\cap \\mathcal {M} _ {\\theta}\\right) \\inf _ {x \\in B _ {r} ^ {d} \\left(x _ {0}\\right) \\cap \\mathcal {M} _ {\\theta}} p _ {\\theta} (x) \\tag {49}", + "image_path": "9ce361f67323ccdf09018fef6481ba561fa7ae6f8f9dd117518a3d3770ce198b.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 544, + 163, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 163, + 556 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 163, + 556 + ], + "type": "text", + "content": "and similarly," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 190, + 555, + 505, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 555, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 190, + 555, + 505, + 578 + ], + "type": "interline_equation", + "content": "P _ {*} \\left(B _ {r} ^ {d} \\left(x _ {0}\\right)\\right) \\leq \\mu_ {*} \\left(B _ {r} ^ {d} \\left(x _ {0}\\right) \\cap \\mathcal {M} _ {*}\\right) \\sup _ {x \\in B _ {r} ^ {d} \\left(x _ {0}\\right)) \\cap \\mathcal {M} _ {*}} p _ {*} (x). \\tag {50}", + "image_path": "1884be8d39570997df621a4c972546452c0f2e48b0cef1ba41b7e9f41dc3f89f.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 579, + 184, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 579, + 184, + 591 + ], + "spans": [ + { + "bbox": [ + 105, + 579, + 184, + 591 + ], + "type": "text", + "content": "Using Lemma E.4," + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 173, + 596, + 505, + 655 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 596, + 505, + 655 + ], + "spans": [ + { + "bbox": [ + 173, + 596, + 505, + 655 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\frac {P _ {*} \\left(B _ {r} ^ {d} \\left(x _ {0}\\right)\\right)}{P _ {\\theta} \\left(B _ {r} ^ {d} \\left(x _ {0}\\right)\\right)} \\leq \\frac {\\mu_ {*} \\left(B _ {r} ^ {d} \\left(x _ {0}\\right) \\cap \\mathcal {M} _ {*}\\right)}{\\mu_ {\\theta} \\left(B _ {r} ^ {d} \\left(x _ {0}\\right) \\cap \\mathcal {M} _ {\\theta}\\right)} \\cdot \\frac {\\sup _ {x \\in B _ {r} ^ {d} \\left(x _ {0}\\right) \\cap \\mathcal {M} _ {*}} p _ {*} (x)}{\\inf _ {x \\in B _ {r} ^ {d} \\left(x _ {0}\\right) \\cap \\mathcal {M} _ {\\theta}} p _ {\\theta} (x)} (51) \\\\ \\leq \\frac {C _ {2} ^ {\\mathcal {M} _ {*}}}{C _ {1} ^ {\\mathcal {M} _ {\\theta}}} r ^ {\\operatorname {L I D} _ {*} (x _ {0}) - \\operatorname {L I D} _ {\\theta} (x _ {0})} \\frac {\\sup _ {x \\in B _ {r} ^ {d} (x _ {0}) \\cap \\mathcal {M} _ {*}} p _ {*} (x)}{\\inf _ {x \\in B _ {r} ^ {d} (x _ {0}) \\cap \\mathcal {M} _ {\\theta}} p _ {\\theta} (x)}. (52) \\\\ \\end{array}", + "image_path": "987ba5c509057caf21c3fe2d37b20d1053e1ee33111aeae989ff4913eca57e24.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "spans": [ + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "text", + "content": "Note that by continuity and positivity of " + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "inline_equation", + "content": "p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "inline_equation", + "content": "\\inf_{x \\in B_r^d(x_0) \\cap \\mathcal{M}_\\theta} p_{\\theta}(x)" + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "text", + "content": " is bounded away from 0 as " + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "inline_equation", + "content": "r \\to 0" + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "text", + "content": ", and by continuity of " + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "inline_equation", + "content": "p_*(x)" + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "inline_equation", + "content": "\\sup_{x \\in B_r^d(x_0) \\cap \\mathcal{M}_*}" + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "text", + "content": " is bounded. In turn, since by assumption " + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_*(x_0) > \\mathrm{LID}_\\theta(x_0)" + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "text", + "content": ", Equation 52 converges to 0 as " + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "inline_equation", + "content": "r \\to 0" + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "text", + "content": ". As a result, there exists some " + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "inline_equation", + "content": "r_0" + }, + { + "bbox": [ + 104, + 659, + 504, + 707 + ], + "type": "text", + "content": " sufficiently small enough for both" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 266, + 707, + 505, + 735 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 266, + 707, + 505, + 735 + ], + "spans": [ + { + "bbox": [ + 266, + 707, + 505, + 735 + ], + "type": "interline_equation", + "content": "\\frac {P _ {*} \\left(B _ {r _ {0}} ^ {d} \\left(x _ {0}\\right)\\right)}{P _ {\\theta} \\left(B _ {r _ {0}} ^ {d} \\left(x _ {0}\\right)\\right)} \\leq \\frac {1}{\\lambda} \\tag {53}", + "image_path": "7cf358582054acd9b42787a029d449b909da4a543e8c1023ca9ab45a1dbaf765.jpg" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 83, + 124, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 83, + 124, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 124, + 94 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 268, + 91, + 505, + 106 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 91, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 268, + 91, + 505, + 106 + ], + "type": "interline_equation", + "content": "P _ {*} \\left(B _ {r _ {0}} ^ {d} \\left(x _ {0}\\right)\\right) \\leq \\gamma \\tag {54}", + "image_path": "e22ef32544a20bd9153706253e6707103b7ef4bd40d5dde3e7e5345b928c55fc.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "text", + "content": "to be true (the latter arising from the fact that " + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "inline_equation", + "content": "P_{*}(B_{r}^{d}(x_{0})) \\to 0" + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "inline_equation", + "content": "r \\to 0" + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "text", + "content": ", which follows from " + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "inline_equation", + "content": "P_{*}" + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "text", + "content": " being absolutely continuous with respect to " + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "inline_equation", + "content": "\\mu_{*}" + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "inline_equation", + "content": "\\mu_{*}" + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "text", + "content": " not assigning positive measure to singletons because " + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "inline_equation", + "content": "\\mathrm{LID}_{*}(x_{0}) > 0" + }, + { + "bbox": [ + 104, + 110, + 504, + 144 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "spans": [ + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "text", + "content": "Thus, any " + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "inline_equation", + "content": "x \\in B_{r_0}^d(x_0)" + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "text", + "content": " is a " + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "inline_equation", + "content": "(\\lambda, \\gamma)" + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "text", + "content": "-copy of " + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "inline_equation", + "content": "x_0" + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "text", + "content": ". Since " + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "inline_equation", + "content": "B_{r_0}^d(x_0) \\cap \\mathcal{M}_\\theta" + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "text", + "content": " contains an open set in the subspace topology of " + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_\\theta" + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "text", + "content": ", it follows that " + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "inline_equation", + "content": "\\mu_\\theta(B_{r_0}^d(x_0) \\cap \\mathcal{M}_\\theta) > 0" + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "text", + "content": ". Then, since " + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "inline_equation", + "content": "p_\\theta(x)" + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "text", + "content": " is strictly positive, " + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "inline_equation", + "content": "P_\\theta(B_{r_0}^d(x_0)) > 0" + }, + { + "bbox": [ + 104, + 148, + 505, + 188 + ], + "type": "text", + "content": ", so that" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 127, + 194, + 505, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 194, + 505, + 251 + ], + "spans": [ + { + "bbox": [ + 127, + 194, + 505, + 251 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} P _ {\\theta} \\left(x _ {j} \\text {i s a} (\\lambda , \\gamma) \\text {- c o p y o f} x _ {0} \\text {f o r s o m e} 1 \\leq j \\leq m\\right) (55) \\\\ = 1 - P _ {\\theta} \\left(x _ {j} \\text {i s n o t a} (\\lambda , \\gamma) \\text {- c o p y o f} x _ {0} \\text {f o r e v e r y} 1 \\leq j \\leq m\\right) (56) \\\\ = 1 - P _ {\\theta} \\left(x _ {1} \\text {i s n o t a} (\\lambda , \\gamma) \\text {- c o p y} x _ {0}\\right) \\dots P _ {\\theta} \\left(x _ {m} \\text {i s n o t a} (\\lambda , \\gamma) \\text {- c o p y} x _ {0}\\right) (57) \\\\ \\geq 1 - \\left(1 - P _ {\\theta} \\left(B _ {r _ {0}} ^ {d} \\left(x _ {0}\\right)\\right)\\right) ^ {m} (58) \\\\ \\end{array}", + "image_path": "0d9e569f5cd0ef477237680b1602dd6289a479f9c1e4ea02dfd4e82d7b744805.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 257, + 217, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 257, + 217, + 268 + ], + "spans": [ + { + "bbox": [ + 105, + 257, + 217, + 268 + ], + "type": "text", + "content": "converges to 1 as " + }, + { + "bbox": [ + 105, + 257, + 217, + 268 + ], + "type": "inline_equation", + "content": "m\\to \\infty" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 326, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 326, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 326, + 94 + ], + "type": "text", + "content": "F MEMORIZED IMAGES FROM CIFAR-10" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 227 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 227 + ], + "type": "text", + "content": "We generate 50,000 images from each model. Since StyleGAN2-ADA is class-conditioned, we generate an equal number of samples per class. We then retrieve nearest neighbors from the training dataset using both " + }, + { + "bbox": [ + 104, + 106, + 506, + 227 + ], + "type": "inline_equation", + "content": "(i)" + }, + { + "bbox": [ + 104, + 106, + 506, + 227 + ], + "type": "text", + "content": " SSCD distance (Pizzi et al., 2022) and " + }, + { + "bbox": [ + 104, + 106, + 506, + 227 + ], + "type": "inline_equation", + "content": "(ii)" + }, + { + "bbox": [ + 104, + 106, + 506, + 227 + ], + "type": "text", + "content": " calibrated " + }, + { + "bbox": [ + 104, + 106, + 506, + 227 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 106, + 506, + 227 + ], + "type": "text", + "content": " distance (Carlini et al., 2023). We find that each metric produces very different results, so we combine results from both to maximize our probability of identifying as many memorized images as possible. Furthermore, both metrics produce many false negatives, so we follow a manual process to produce accurate labels. For StyleGAN2-ADA, we take the closest 250 neighbours according to each distance, and for iDDPM, we take the top 300, producing a set of just under 500 or 600 images to visually examine for each model. We then label all of these instances as either not memorized, exactly memorized, or reconstructively memorized (Somepalli et al., 2023a). All other images are not labelled, and have a low chance of being memorized." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 232, + 504, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 504, + 266 + ], + "type": "text", + "content": "Here we show each generated image we identified (odd rows) along with its matched training image (even rows below) for iDDPM and StyleGAN2-ADA on CIFAR10. For StyleGAN2-ADA, we labelled no pairs as reconstructive under the calibrated " + }, + { + "bbox": [ + 104, + 232, + 504, + 266 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 104, + 232, + 504, + 266 + ], + "type": "text", + "content": " distance." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 106, + 276, + 265, + 323 + ], + "blocks": [ + { + "bbox": [ + 106, + 276, + 265, + 323 + ], + "lines": [ + { + "bbox": [ + 106, + 276, + 265, + 323 + ], + "spans": [ + { + "bbox": [ + 106, + 276, + 265, + 323 + ], + "type": "image", + "image_path": "21663f9df8855cc937c5bbf34d658112dbd67c0d9e4e8c56798bf8d801904a51.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 108, + 331, + 500, + 344 + ], + "lines": [ + { + "bbox": [ + 108, + 331, + 500, + 344 + ], + "spans": [ + { + "bbox": [ + 108, + 331, + 500, + 344 + ], + "type": "text", + "content": "Figure 12: iDDPM: human-labelled reconstructive pairs in the top 300 according to calibrated " + }, + { + "bbox": [ + 108, + 331, + 500, + 344 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 108, + 331, + 500, + 344 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 106, + 359, + 444, + 407 + ], + "blocks": [ + { + "bbox": [ + 106, + 359, + 444, + 407 + ], + "lines": [ + { + "bbox": [ + 106, + 359, + 444, + 407 + ], + "spans": [ + { + "bbox": [ + 106, + 359, + 444, + 407 + ], + "type": "image", + "image_path": "9c04cd7bec2c5b861a984e643eea45b2ee084488577e265603965fb0c0b30a43.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 415, + 483, + 427 + ], + "lines": [ + { + "bbox": [ + 126, + 415, + 483, + 427 + ], + "spans": [ + { + "bbox": [ + 126, + 415, + 483, + 427 + ], + "type": "text", + "content": "Figure 13: iDDPM: human-labelled exact pairs in the top 300 according to calibrated " + }, + { + "bbox": [ + 126, + 415, + 483, + 427 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 126, + 415, + 483, + 427 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 106, + 443, + 557, + 580 + ], + "blocks": [ + { + "bbox": [ + 106, + 443, + 557, + 580 + ], + "lines": [ + { + "bbox": [ + 106, + 443, + 557, + 580 + ], + "spans": [ + { + "bbox": [ + 106, + 443, + 557, + 580 + ], + "type": "image", + "image_path": "bda12a4ae2f4d928509c2db115df9b8664d6bdf4a79ba7ebcc8ace6c7e2db1c3.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 121, + 587, + 487, + 601 + ], + "lines": [ + { + "bbox": [ + 121, + 587, + 487, + 601 + ], + "spans": [ + { + "bbox": [ + 121, + 587, + 487, + 601 + ], + "type": "text", + "content": "Figure 14: iDDPM: human-labelled reconstructive pairs in the top 300 according to SSCD." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 710, + 504, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 710, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 504, + 731 + ], + "type": "text", + "content": "5The cutoffs of 250 and 300 were chosen by inspection; from these ranks onwards, images ceased to appear. \nmemorized." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 79, + 556, + 441 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 556, + 441 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 556, + 441 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 556, + 441 + ], + "type": "image", + "image_path": "030a3baa2848c07dc3d071b7917d5a4d2086eff1e4cc89a8b4c58a7813581be7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 138, + 448, + 471, + 460 + ], + "lines": [ + { + "bbox": [ + 138, + 448, + 471, + 460 + ], + "spans": [ + { + "bbox": [ + 138, + 448, + 471, + 460 + ], + "type": "text", + "content": "Figure 15: iDDPM: human-labelled exact pairs in the top 300 according to SSCD." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 106, + 479, + 242, + 525 + ], + "blocks": [ + { + "bbox": [ + 106, + 479, + 242, + 525 + ], + "lines": [ + { + "bbox": [ + 106, + 479, + 242, + 525 + ], + "spans": [ + { + "bbox": [ + 106, + 479, + 242, + 525 + ], + "type": "image", + "image_path": "6a42936f1c1d5609225d462cf8155f373e3cb06fa91b9f9104aef7bf87cb21b8.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 106, + 533, + 502, + 546 + ], + "lines": [ + { + "bbox": [ + 106, + 533, + 502, + 546 + ], + "spans": [ + { + "bbox": [ + 106, + 533, + 502, + 546 + ], + "type": "text", + "content": "Figure 16: StyleGAN2-ADA: human-labelled exact pairs in the top 250 according to calibrated " + }, + { + "bbox": [ + 106, + 533, + 502, + 546 + ], + "type": "inline_equation", + "content": "\\ell_2" + }, + { + "bbox": [ + 106, + 533, + 502, + 546 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 106, + 562, + 556, + 654 + ], + "blocks": [ + { + "bbox": [ + 106, + 562, + 556, + 654 + ], + "lines": [ + { + "bbox": [ + 106, + 562, + 556, + 654 + ], + "spans": [ + { + "bbox": [ + 106, + 562, + 556, + 654 + ], + "type": "image", + "image_path": "4116a1fa351b83d3359d8c4d22fb6122f7c45b67a8b05ec2cc796028a7aaf7ae.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 662, + 504, + 674 + ], + "lines": [ + { + "bbox": [ + 105, + 662, + 504, + 674 + ], + "spans": [ + { + "bbox": [ + 105, + 662, + 504, + 674 + ], + "type": "text", + "content": "Figure 17: StyleGAN2-ADA: human-labelled reconstructive pairs in the top 250 according to SSCD." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 236, + 557, + 553 + ], + "blocks": [ + { + "bbox": [ + 106, + 236, + 557, + 553 + ], + "lines": [ + { + "bbox": [ + 106, + 236, + 557, + 553 + ], + "spans": [ + { + "bbox": [ + 106, + 236, + 557, + 553 + ], + "type": "image", + "image_path": "31918c9c708703f98e2708581f1202b737c65cd5abbb1d837fb3853f7b61f92d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 560, + 492, + 574 + ], + "lines": [ + { + "bbox": [ + 119, + 560, + 492, + 574 + ], + "spans": [ + { + "bbox": [ + 119, + 560, + 492, + 574 + ], + "type": "text", + "content": "Figure 18: StyleGAN2-ADA: human-labelled exact pairs in the top 250 according to SSDC." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file