SlowGuess commited on
Commit
694a82b
·
verified ·
1 Parent(s): 7ce02ed

Add Batch 2243897a-9167-4b3f-8de3-4f908914984f

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. 2dshapleyaframeworkforfragmenteddatavaluation/60ac8018-66b7-4c21-ba56-8dfa341aa736_content_list.json +3 -0
  2. 2dshapleyaframeworkforfragmenteddatavaluation/60ac8018-66b7-4c21-ba56-8dfa341aa736_model.json +3 -0
  3. 2dshapleyaframeworkforfragmenteddatavaluation/60ac8018-66b7-4c21-ba56-8dfa341aa736_origin.pdf +3 -0
  4. 2dshapleyaframeworkforfragmenteddatavaluation/full.md +851 -0
  5. 2dshapleyaframeworkforfragmenteddatavaluation/images.zip +3 -0
  6. 2dshapleyaframeworkforfragmenteddatavaluation/layout.json +3 -0
  7. acategorytheoreticalmetaanalysisofdefinitionsofdisentanglement/e2d04a42-5377-4c90-b24a-a1cebb6cf79b_content_list.json +3 -0
  8. acategorytheoreticalmetaanalysisofdefinitionsofdisentanglement/e2d04a42-5377-4c90-b24a-a1cebb6cf79b_model.json +3 -0
  9. acategorytheoreticalmetaanalysisofdefinitionsofdisentanglement/e2d04a42-5377-4c90-b24a-a1cebb6cf79b_origin.pdf +3 -0
  10. acategorytheoreticalmetaanalysisofdefinitionsofdisentanglement/full.md +826 -0
  11. acategorytheoreticalmetaanalysisofdefinitionsofdisentanglement/images.zip +3 -0
  12. acategorytheoreticalmetaanalysisofdefinitionsofdisentanglement/layout.json +3 -0
  13. acloserlookatfewshotclassificationagain/bcec5be9-469f-4be1-a990-fdf6de674987_content_list.json +3 -0
  14. acloserlookatfewshotclassificationagain/bcec5be9-469f-4be1-a990-fdf6de674987_model.json +3 -0
  15. acloserlookatfewshotclassificationagain/bcec5be9-469f-4be1-a990-fdf6de674987_origin.pdf +3 -0
  16. acloserlookatfewshotclassificationagain/full.md +0 -0
  17. acloserlookatfewshotclassificationagain/images.zip +3 -0
  18. acloserlookatfewshotclassificationagain/layout.json +3 -0
  19. acloserlookatselfsupervisedlightweightvisiontransformers/7e5a55d5-3963-44b6-830b-97d8f7478c51_content_list.json +3 -0
  20. acloserlookatselfsupervisedlightweightvisiontransformers/7e5a55d5-3963-44b6-830b-97d8f7478c51_model.json +3 -0
  21. acloserlookatselfsupervisedlightweightvisiontransformers/7e5a55d5-3963-44b6-830b-97d8f7478c51_origin.pdf +3 -0
  22. acloserlookatselfsupervisedlightweightvisiontransformers/full.md +495 -0
  23. acloserlookatselfsupervisedlightweightvisiontransformers/images.zip +3 -0
  24. acloserlookatselfsupervisedlightweightvisiontransformers/layout.json +3 -0
  25. acloserlookattheinterventionprocedureofconceptbottleneckmodels/c96d4482-d4bb-492b-abd1-1d83d3c04a21_content_list.json +3 -0
  26. acloserlookattheinterventionprocedureofconceptbottleneckmodels/c96d4482-d4bb-492b-abd1-1d83d3c04a21_model.json +3 -0
  27. acloserlookattheinterventionprocedureofconceptbottleneckmodels/c96d4482-d4bb-492b-abd1-1d83d3c04a21_origin.pdf +3 -0
  28. acloserlookattheinterventionprocedureofconceptbottleneckmodels/full.md +730 -0
  29. acloserlookattheinterventionprocedureofconceptbottleneckmodels/images.zip +3 -0
  30. acloserlookattheinterventionprocedureofconceptbottleneckmodels/layout.json +3 -0
  31. acompleteexpressivenesshierarchyforsubgraphgnnsviasubgraphweisfeilerlehmantests/44a1d0cf-c56a-42a0-8785-7b5f1d6c9d78_content_list.json +3 -0
  32. acompleteexpressivenesshierarchyforsubgraphgnnsviasubgraphweisfeilerlehmantests/44a1d0cf-c56a-42a0-8785-7b5f1d6c9d78_model.json +3 -0
  33. acompleteexpressivenesshierarchyforsubgraphgnnsviasubgraphweisfeilerlehmantests/44a1d0cf-c56a-42a0-8785-7b5f1d6c9d78_origin.pdf +3 -0
  34. acompleteexpressivenesshierarchyforsubgraphgnnsviasubgraphweisfeilerlehmantests/full.md +0 -0
  35. acompleteexpressivenesshierarchyforsubgraphgnnsviasubgraphweisfeilerlehmantests/images.zip +3 -0
  36. acompleteexpressivenesshierarchyforsubgraphgnnsviasubgraphweisfeilerlehmantests/layout.json +3 -0
  37. aconditionalnormalizingflowforacceleratedmulticoilmrimaging/dad5fa8b-794f-4094-bd4b-2f7c40ff0fce_content_list.json +3 -0
  38. aconditionalnormalizingflowforacceleratedmulticoilmrimaging/dad5fa8b-794f-4094-bd4b-2f7c40ff0fce_model.json +3 -0
  39. aconditionalnormalizingflowforacceleratedmulticoilmrimaging/dad5fa8b-794f-4094-bd4b-2f7c40ff0fce_origin.pdf +3 -0
  40. aconditionalnormalizingflowforacceleratedmulticoilmrimaging/full.md +415 -0
  41. aconditionalnormalizingflowforacceleratedmulticoilmrimaging/images.zip +3 -0
  42. aconditionalnormalizingflowforacceleratedmulticoilmrimaging/layout.json +3 -0
  43. aconnectionbetweenonesteprlandcriticregularizationinreinforcementlearning/ee44bd65-4ebd-4d5a-9321-fa647ed48b24_content_list.json +3 -0
  44. aconnectionbetweenonesteprlandcriticregularizationinreinforcementlearning/ee44bd65-4ebd-4d5a-9321-fa647ed48b24_model.json +3 -0
  45. aconnectionbetweenonesteprlandcriticregularizationinreinforcementlearning/ee44bd65-4ebd-4d5a-9321-fa647ed48b24_origin.pdf +3 -0
  46. aconnectionbetweenonesteprlandcriticregularizationinreinforcementlearning/full.md +0 -0
  47. aconnectionbetweenonesteprlandcriticregularizationinreinforcementlearning/images.zip +3 -0
  48. aconnectionbetweenonesteprlandcriticregularizationinreinforcementlearning/layout.json +3 -0
  49. acoupledflowapproachtoimitationlearning/865a27f8-93e6-494a-8b17-c293ff834e3b_content_list.json +3 -0
  50. acoupledflowapproachtoimitationlearning/865a27f8-93e6-494a-8b17-c293ff834e3b_model.json +3 -0
2dshapleyaframeworkforfragmenteddatavaluation/60ac8018-66b7-4c21-ba56-8dfa341aa736_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2823f34a9c6d4e72b7b35af3f475f07eb728409916b7cbf9515e8bea0abaafa9
3
+ size 158949
2dshapleyaframeworkforfragmenteddatavaluation/60ac8018-66b7-4c21-ba56-8dfa341aa736_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f9d14c1c28b815ca0ef20fea1454aaf5c3e4755a670a0986d928c490bca8218
3
+ size 186302
2dshapleyaframeworkforfragmenteddatavaluation/60ac8018-66b7-4c21-ba56-8dfa341aa736_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:848f3462deaa8d533273859e0bf1a71891c9aae4dce7dde257815a76a50247d0
3
+ size 5428123
2dshapleyaframeworkforfragmenteddatavaluation/full.md ADDED
@@ -0,0 +1,851 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Zhihong Liu $^{*1}$ Hoang Anh Just $^{*2}$ Xiangyu Chang $^{1}$ Xi Chen $^{3}$ Ruoxi Jia $^{2}$
2
+
3
+ # Abstract
4
+
5
+ Data valuation—quantifying the contribution of individual data sources to certain predictive behaviors of a model—is of great importance to enhancing the transparency of machine learning and designing incentive systems for data sharing. Existing work has focused on evaluating data sources with the shared feature or sample space. How to value fragmented data sources of which each only contains partial features and samples remains an open question. We start by presenting a method to calculate the counterfactual of removing a fragment from the aggregated data matrix. Based on the counterfactual calculation, we further propose 2D-Shapley, a theoretical framework for fragmented data valuation that uniquely satisfies some appealing axioms in the fragmented data context. 2D-Shapley empowers a range of new use cases, such as selecting useful data fragments, providing interpretation for sample-wise data values, and fine-grained data issue diagnosis.
6
+
7
+ # 1. Introduction
8
+
9
+ Data are essential ingredients for building machine learning (ML) applications. The ability to quantify and measure the value of data is crucial to the entire lifecycle of ML: from cleaning poor-quality sam
10
+
11
+ *Equal contribution. Code repository publicly available: https://github.com/ruoxi-jia-group/2dshapley<sup>1</sup>Center for Intelligent Decision-Making and Machine Learning, Department of Information Systems and Intelligent Business, School of Management, Xi'an Jiaotong University, Xi'an, 710049, China.<sup>2</sup>Bradley Department of Electrical and Computer Engineering, Virginia Tech, Virginia, USA.<sup>3</sup>Department of Technology, Operations, and Statistics, Stern School of Business, New York University, New York, 10012, USA. Correspondence to: Xiangyu Chang < xiangyuchang@xjtu.edu.cn>, Xi Chen <xc13@stern.nyu.edu>, Ruoxi Jia <ruoxijia@vt.edu>.
12
+
13
+ Proceedings of the $40^{th}$ International Conference on Machine Learning, Honolulu, Hawaii, USA. PMLR 202, 2023. Copyright 2023 by the author(s).
14
+
15
+ ![](images/c693a4c5b1e17c0883d736bff2aee209edc3863f255dd087d8703bfd76017c93.jpg)
16
+ (a) Horizontal Valuation
17
+
18
+ ![](images/1ca114a6b20d4ca5547d1d4cd88245df2900d00edc2822f0e22a736fae4de2b8.jpg)
19
+ (b) Vertical Valuation
20
+ Figure 1: Illustration of different data valuation settings based on how training set is partitioned among different data contributors.
21
+
22
+ ![](images/2cf66b551e22fc05fb6dbd7ee6b41309423519467f4f7128fc6b91ec3bfceea8.jpg)
23
+ (c) Fragmented Valuation
24
+
25
+ plies and tracking important ones to be collected during data preparation to setting proper proprieties over samples during training to interpret why certain behaviors of a model emerge during deployment. Determining the value of data is also central to designing incentive systems for data sharing and implementing current policies about the monetarization of personal data.
26
+
27
+ Current literature of data valuation (Jia et al., 2019b; Ghorbani & Zou, 2019) has exclusively focused on valuing horizontally partitioned data—in other words, each data source to be valued shares the same feature space. How to value vertically partitioned data, where each data source provides a different feature but shares the same sample space, has been studied in the context of ML interpretability (Covert et al., 2020). However, none of these abstractions could fully capture the complexity of real-world scenarios, where data sources can have non-overlapping features and samples (termed as fragmented data sources hereinafter).
28
+
29
+ Example 1. Consider two banks, $B_{1}$ and $B_{2}$ , and two e-commerce companies, $E_{1}$ and $E_{2}$ , located in Region 1 and 2. These four institutions are interested in collaboratively building an ML model to predict users' credit scores with their data. Due to the geographical difference, $B_{1}$ and $E_{1}$ have a different user group from $B_{2}$ and $E_{2}$ . Also, due to the difference in business, $B_{1}$ and $B_{2}$ provide different features than what $E_{1}$ and $E_{2}$ can offer. Overall, the four institutions partition the aggregated data horizontally and vertically, as illustrated by Figure 1(c). How to quantify each institution's contribution to the joint model training?
30
+
31
+ Example 2. Due to inevitable errors occurring during the data generation and collection processes, real-world data are seldom high quality. Suppose that a
32
+
33
+ data analyst is interested in identifying some potentially erroneous entries in a dataset. Existing horizontal and vertical data valuation tools can help locate the rows or columns that could contain errors by returning the ones with the lowest values. Nevertheless, can we perform more fine-grained detection—e.g., how to pinpoint the coordinate of erroneous entries?
34
+
35
+ Example 3. Horizontal data valuation is now widely used to explain the importance of each sample to a learning outcome (Tang et al., 2021; Karlas et al., 2022). But how can a data analyst further explain these sample importance scores—why a sample receives a certain importance score? Is a sample "low-quality" because it contains several "moderate low quality" features or an "exceptionally low quality" feature?
36
+
37
+ Answering the above questions calls for a quantitative understanding of how each block in the data matrix (e.g. a sub-matrix as in Ex. 1 or a single entry as in Ex. 2 and 3) contributes to the outcome of learning.
38
+
39
+ Technical Challenges. The problem of block valuation requires rethinking about fundamental aspects of data valuation. Existing data valuation theory consists of two basic modules at a conceptual level: (1) Counterfactual Analysis, where one calculates how the utility of a subset of data sources would change after the source to be valued is removed; and (2) Fair Attribution, where a data source is valued based on a weighted average of its marginal utilities for different subsets and the weights are set for the value to satisfy certain fairness properties. The fairness notion considered by the past valuation schemes requires that permuting the order of different data sources does not change their value.
40
+
41
+ For horizontal and vertical valuation, the counterfactual can be simply calculated by taking the difference between the model performance trained on a subset of columns or rows and the performance with one column or row being removed. However, it is unclear how to calculate the counterfactual when a block is excluded because the remaining data matrix could be incomplete. Besides, the fairness notion of existing data value notions is no longer appropriate in the context of block valuation. As a concrete example to illustrate this point, consider Figure 1(c) and suppose the two blocks on the left provide temperature measurements as features and the ones on the right are humidity measurements. In this case, one should not expect the value to be unchanged when two blocks with different physical meanings (e.g., yellow and pink) are swapped.
42
+
43
+ Contributions. This paper presents the first focused study on data valuation without assuming shared fea
44
+
45
+ ture space or sample space. Toward that end, we make the following contributions.
46
+
47
+ - We present an approach that enables evaluation of the marginal contribution of a block within the data matrix to any other block with non-overlapping sample and feature spaces.
48
+ - We abstract the block valuation problem into a two-dimensional (2D) cooperative game, where the utility function is invariant to column permutations and row permutations but not to any arbitrary entry permutations.
49
+ - We propose axioms that a proper valuation scheme should satisfy in the 2D game and show that the axioms lead to a unique representation of the value assignment (referred to as 2D-Shapley). Particularly, this representation is a natural generalization of the Shapley value (Shapley, 1997)—a celebrated value attribution scheme widely used in data valuation among other applications.
50
+ - We demonstrate that 2D-Shapley enables new applications, including selecting useful data fragments, providing interpretation for sample-wise data values, and fine-grained data issue diagnosis.
51
+
52
+ # 2. Background and Related Work
53
+
54
+ In a typical setting, a set of data sources are used to learn an ML model, which achieves a certain performance score. The goal of data valuation is to quantify the contribution of each data source toward achieving the performance score. The definition of a data source depends on the context in which the data valuation results are utilized. For instance, when using data valuation to interpret how the global behavior of the ML model depends on individual samples or individual features, a sample or a feature in the training data is regarded as a data source; when using data valuation to inform the reward design for data sharing, the collection of all samples or all features contributed by the same entities is regarded as a data source.
55
+
56
+ Formally, let $N = \{1,\dots ,n\}$ denotes the index set of $n$ training data sources. A data valuation scheme assigns a score to each training data source in a way that reflects their contribution. These scores are referred to as data values. To analyze a source's "contribution", we define a utility function $U:2^{N}\to \mathbb{R}$ , which maps any subset of the data sources to a score indicating the usefulness of the subset. $2^{N}$ represents the power set of $N$ , i.e., the set of all subsets of $N$ , including the empty set and $N$ itself. For the classification task, a common choice for $U$ is the performance of a model trained on the input subset, i.e., $U(S) = \mathrm{acc}(\mathcal{A}(S))$ , where $\mathcal{A}$ is a learning algorithm that takes a set $S\subseteq N$ of sources as
57
+
58
+ input and returns a model, and acc is a metric function to evaluate the performance of a given model, e.g., the accuracy of a model on a hold-out validation set.
59
+
60
+ Past research has proposed various ways to characterize data values given the utility function, among which the Shapley value is arguably the most widely used scheme for data valuation. The Shapley value is defined as
61
+
62
+ $$
63
+ \psi_ {i} ^ {1 d} (U) := \frac {1}{n} \sum_ {k = 1} ^ {n} \binom {n - 1} {k - 1} ^ {- 1} \sum_ {\substack {S \subseteq N \setminus i \\ | S | = k - 1}} [ U (S \cup i) - U (S) ]. \tag{1}
64
+ $$
65
+
66
+ To differentiate from the proposed work, we will refer to the Shapley value defined in Eq. (1) as 1D-Shapley. 1D-Shapley is popular due to its unique satisfaction of the following four axioms (Shapley, 1953):
67
+
68
+ - Dummy: if $U(S \cup i) = U(S) + c$ for any $S \subseteq N \setminus i$ and some $c \in \mathbb{R}$ , then $\psi_i^{1d}(U) = c$ .
69
+ - Symmetry: let $\pi : N \to N$ be any permutation of $N$ and $\pi U(S) \coloneqq U(\pi(S))$ , then $\psi_{\pi(i)}^{1d}(\pi U) = \psi_i^{1d}(U)$ .
70
+ - Linearity: For utility functions $U_{1}, U_{2}$ and any $\alpha_{1}, \alpha_{2} \in \mathbb{R}$ , $\psi_{i}^{1d}(\alpha_{1}U_{1} + \alpha_{2}U_{2}) = \alpha_{1}\psi_{i}^{1d}(U_{1}) + \alpha_{2}\psi_{i}^{1d}(U_{2})$ .
71
+ - Efficiency: for every $U, \sum_{i \in N} \psi_i^{1d}(U) = U(N)$ .
72
+
73
+ The symmetry axiom embodies fairness. In particular, $\pi U$ arises upon the reindexing of data sources $1,\ldots ,n$ with the indices $\pi (1),\dots ,\pi (n)$ ; the symmetry axiom states that the evaluation of a particular position should not depend on the indices of the data sources.
74
+
75
+ Although the Shapley value was justified through these axioms in prior literature, the necessity of each axiom depends on the actual use case of data valuation results. Recent literature has studied new data value notions obtained by relaxing some of the aforementioned axioms and enabled improvements in terms of accuracy of bad data identification (Kwon & Zou, 2022), robustness to learning stochasticity (Wang & Jia, 2023; Wu et al., 2022a), and computational efficiency (Yan & Procaccia, 2021). For instance, relaxing the efficiency axiom gives rise to semi-values (Kwon & Zou, 2022; Wang & Jia, 2023); relaxing the linearity axiom gives rise to least cores (Yan & Procaccia, 2021). This paper will focus on generalizing 1D-Shapley to block valuation. As we will expound on later, 1D-Shapley faces two limitations to serve a reasonable notion for blockwise values. Note that 1D-Shapley and the aforementioned relaxed notions share a similar structure: all of them are based on the marginal utility of a data source. Hence, our effort to generalize the 1D-Shapley to new settings can be adapted to other more relaxed notions.
76
+
77
+ Another line of related work focuses on developing ef
78
+
79
+ ficient algorithms for data valuation via Monte Carlo methods (Jia et al., 2019b; Lin et al., 2022), via surrogate utility functions such as $K$ -nearest-neighbors (Jia et al., 2019a), neural tangent kernels (Wu et al., 2022b), and distributional distance measures (Just et al., 2023; Tay et al., 2022), and via reinforcement learning (Yoon et al., 2020). These ideas can also benefit the efficient computation of the proposed 2D-Shapley. As a concrete example, this paper builds upon Monte Carlo simulation and surrogate model approaches to improve the efficiency of 2D-Shapley.
80
+
81
+ Beyond data valuation, 1D-Shapley has been extensively used to gain feature-based interpretability for black-box models locally and globally. The local interpretability methods (Lundberg & Lee, 2017; Strumbelj & Kononenko, 2010) focus on analyzing the relative importance of features for each input separately; therefore, the importance scores of features across different samples are not comparable. By contrast, our work allows the comparison of feature importance across different samples. The global interpretability methods (Covert et al., 2020), on the other hand, explain the model's behavior across the entire dataset. In the context of this paper, we consider them vertical data valuation. Compared to global interpretability methods, our work provides a more fine-grained valuation by associating each entry of the feature with an importance score. Our work improves the interpretability of the global feature importance score in the sense that it reveals the individual sample's contribution to the importance of a feature.
82
+
83
+ # 3. How to Value a Block?
84
+
85
+ This section starts with formulating the block valuation problem. Then, we will discuss the challenges of using 1D-Shapley to tackle the block valuation problem in terms of both counterfactual analysis and fair attribution. At last, we will present our proposed framework for solving the block valuation problem.
86
+
87
+ # 3.1. Problem Formulation
88
+
89
+ Let $N = \{1,2,\dots ,n\}$ and $M = \{1,2,\ldots ,m\}$ , indexing $n$ disjoint collection of samples and $m$ disjoint collection of features contributed by $nm$ sources (or blocks). Each data source can be labeled by $(i,j)$ for $i\in N$ and $j\in M$ , where we call $i$ the sample-wise index and $j$ the feature-wise index. To measure the contribution of a data source, we need to define a utility function, which measures the usefulness of a subset of data sources. The utility function $h(S,F)$ takes in two separate sets $S\subseteq N$ and $F\subseteq M$ as the variables and returns a real-valued score indicating the utility of $\{(i,j)\}_{i\in S,j\in F}$ . Note that
90
+
91
+ this paper focuses on valuing the relative importance of feature blocks; that is, we assume that each data contributor provides a block of features and then the aggregation of features will be annotated by a separate entity (e.g., a data labeling company) that does not share the profit generated from joint training. More formally, we define the utility function as follows:
92
+
93
+ $h(S,F)\coloneqq$ Performance of the model trained on the feature blocks $\{(i,j)\}_{i\in S,j\in F}$ after annotation.
94
+
95
+ One can potentially generalize our framework to jointly value feature and label blocks by redefining the utility function to be non-zero only when feature and label are both included in the input block, like (Jia et al., 2019a; Yona et al., 2021), but an in-depth investigation is deferred to future work.
96
+
97
+ The benefit of this utility function definition is twofold. First, its two-dimensional index always corresponds to a data fragment with the same feature space for all samples inside. As a result, one can calculate the utility in a straightforward manner by training on the matrix and evaluating the corresponding performance. This is an essential advantage over the one-dimensional index utilized by 1D-Shapley, as will be exemplified later. Second, created this way, the utility function is invariant to permutations of sample-wise indices in $S$ for any given $F$ and permutations of feature-wise indices in $F$ for any given $S$ , but not to permutations of the sample-wise and feature-wise indices combined. This is a desirable property as for many data types in ML, such as tabular data, one would expect that swapping samples or swapping features $^1$ does not change the model performance, yet swapping any two entries in the matrix may lead to arbitrary errors and thus alter the model performance significantly.
98
+
99
+ Our goal is to assign a score to each block in $\{(i,j)\}_{i\in N,j\in M}$ that measures its contribution to the outcome of joint learning $h(N,M)$ .
100
+
101
+ # 3.2. A Naive Baseline: 1D-Shapley
102
+
103
+ One idea to tackle the block valuation problem is to flatten the indices of blocks into one dimension and leverage 1D-Shapley to value each block. Specifically, we can reindex $\{(i,j)\}_{i\in N,j\in M}$ by $T = \{1,\dots ,nm\}$ . Note that this step discards the structural information contained in the two-dimensional indices. Then, one can utilize Eq. (1) to value each $i\in T$ .
104
+
105
+ The second step of applying Eq. (1) requires calculat-
106
+
107
+ ing $U(S \cup i) - U(S)$ for any $S \subseteq T \setminus i$ . Both $S$ and $S \cup i$ could correspond to a data fragment with samples differing in their feature space (see example in Figure 2); nevertheless, how to evaluate the utility of such a fragment is unclear. An ad hoc way of addressing this problem is to perform missing value imputation, e.g., filling out the missing values of a feature using the average of the feature values present.
108
+
109
+ In addition to the difficulty of evaluating the counterfactual, the symmetry axiom satisfied by 1D-Shapley no longer has the correct fairness interpretation when the input indices are flattened from 2D ones. In that case, $1,\ldots ,nm$ , carry specific meanings entailed by the original 2D structure; e.g., some indices might correspond to temperature features, and others might correspond to humidity. Hence, the symmetry axiom that requires unchanged data values after permuting the data sources' indices is not sensible and necessary, as the permutation might map the content of a data source from one meaning to an entirely different one.
110
+
111
+ We will use 1D-Shapley with missing value imputation as a baseline for our proposed approach. This simple baseline is still a useful benchmark to assess the extra (non-trivial) gains in different application scenarios that our approach can attain.
112
+
113
+ ![](images/12a1dc255539215a9215558eb17177d30b3e857314ccfddf71ad990d93db3552.jpg)
114
+ Figure 2: A visualization of 1D-Shapley marginal contribution applied to sample-feature valuation.
115
+
116
+ # 3.3. Our Approach: 2D-Shapley
117
+
118
+ Here, we will describe 2D-Shapley as a principled framework for valuing data blocks. We will emphasize how 2D-Shapley overcomes the challenges of the 1D-Shapley baseline in terms of (1) calculating the counterfactual, (2) framing the correct fairness principles, and then derive the representation of the data values based on the new counterfactual analysis and principle. At last, we will show efficient algorithms to compute 2D-Shapley.
119
+
120
+ # 3.3.1. Two-Dimensional Counterfactual Analysis
121
+
122
+ Given a two-dimensional utility function $h(\cdot, \cdot)$ , we will define the marginal contribution of a block $(i,j)$ to the collection of blocks $\{(i,j)\}_{i \in S, j \in F}$ as
123
+
124
+ $$
125
+ \begin{array}{l} M _ {h} ^ {i, j} (S, F) := h (S \cup i, F \cup j) + h (S, F) \\ - h (S \cup i, F) - h (S, F \cup j). \tag {2} \\ \end{array}
126
+ $$
127
+
128
+ The rationality of the definition of $M_h^{i,j}(S,F)$ can be shown by Figure 3. The area corresponding to $h(S\cup i,F\cup j)$ can be viewed as the area $(S\cup i,F\cup j)$ , which subtracts these two areas of $(S\cup i,F)$ and $(S,F\cup j)$ , plus the $(S,F)$ area that is subtracted twice, the remaining area is shown in Figure 3 as "marginal", which corresponds to the marginal influence of the block $(i,j)$ .
129
+
130
+ The unique advantage is that each individual utility is well-defined as it takes as input a collection of blocks within which the samples all share same feature space.
131
+
132
+ ![](images/2a211156d7b8689720eb6a33973efe29ee25053970365619ce13b74b86527e9c.jpg)
133
+ Figure 3: Removal process and marginal influence of $(i,j)$ .
134
+
135
+ # 3.3.2. Axioms for Block Valuation
136
+
137
+ We start by redefining "dummy" for block valuation, where the underlying utility function is 2D.
138
+
139
+ Definition 3.1. (2D-Dummy) We call a block $(i,j)$ a 2D-dummy under utility function $h$ if for all $S\subseteq N\backslash i$ and $F\subseteq M\backslash j$
140
+
141
+ $$
142
+ M _ {h} ^ {i, j} (S, F) = c, c \in \mathbb {R}. \tag {3}
143
+ $$
144
+
145
+ 2D-dummy implies the canonical (one-dimensional) dummy mentioned in Section 2. Specifically, if sample $i$ is a sample dummy which satisfies $h(S \cup i, F) = h(S, F) + c_1$ and $h(S \cup i, F \cup j) = h(S, F \cup j) + c_2$ for $S \subseteq N \setminus i, F \subseteq M \setminus j$ like the dummy defined in 1D-Shapley, then Eq. (3) is satisfied with $c := c_2 - c_1$ , and similarly, if feature $j$ is a feature dummy which satisfies $h(S, F \cup j) = h(S, F) + c_1'$ and $h(S \cup i, F \cup j) = h(S \cup i, F) + c_2'$ for $S \subseteq N \setminus i, F \subseteq M \setminus j$ , then Eq. (3) is also satisfied with $c := c_2' - c_1'$ . However, Eq. (3) can not imply sample $i$ is a sample dummy or feature $j$ is a feature dummy.
146
+
147
+ We first define the utility function set $G$ which contains all possible utility functions, and define a value function $\psi : G \to \mathbb{R}^{n \times m}$ and denote the value of block $(i,j)$ as $\psi_{ij}(h)$ which is the $ij$ th element in matrix $\psi(h)$ . In order to build an equitable evaluation system, we provide the following axioms.
148
+
149
+ Axiom 1. (2D-Linearity) For any two utility functions $h_1, h_2 \in G$ and any $\beta_1, \beta_2 \in \mathbb{R}$ ,
150
+
151
+ $$
152
+ \psi_ {i j} \left(\beta_ {1} h _ {1} + \beta_ {2} h _ {2}\right) = \beta_ {1} \psi_ {i j} \left(h _ {1}\right) + \beta_ {2} \psi_ {i j} \left(h _ {2}\right). \tag {4}
153
+ $$
154
+
155
+ Axiom 2. (2D-Dummy) If the block $(i,j)$ is a dummy of $h$ which satisfies Eq. (3), then $\psi_{ij}(h) = c$ .
156
+
157
+ Axiom 3. (2D-Symmetry) Let $\pi_1: N \to N$ and $\pi_2: M \to M$ be two permutations, then:
158
+
159
+ $$
160
+ \psi_ {\pi_ {1} (i) \pi_ {2} (j)} [ (\pi_ {1} \pi_ {2}) h ] = \psi_ {i j} (h), \tag {5}
161
+ $$
162
+
163
+ where for all $S\subseteq N,F\subseteq M$
164
+
165
+ $$
166
+ [ (\pi_ {1} \pi_ {2}) h ] (S, F) := [ (\pi_ {2} \pi_ {1}) h ] (S, F) := h (\pi_ {1} (S), \pi_ {2} (F)). \tag {6}
167
+ $$
168
+
169
+ Axiom 4. (2D-Efficiency) For every utility function $h \in G$ ,
170
+
171
+ $$
172
+ \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(h) = h(N,M). \tag{7}
173
+ $$
174
+
175
+ Let us discuss the rationality of the four axioms.
176
+
177
+ The 2D-linearity axiom is inherited from 1D-Shapley, which implies that the value of the $(i,j)$ -th block under the sum of two ML performance measures is the sum of the value under each performance measure.
178
+
179
+ The 2D-dummy axiom can be interpreted by taking $c = 0$ . If a block has no contribution to the ML task, no matter what the situation (i.e., for any $S \subseteq N \setminus i$ and $F \subseteq M \setminus j$ ), then its value is zero.
180
+
181
+ In the 2D-symmetry axiom, the rows and columns are permuted independently. As a result, the entries from the same feature will always remain in the same column. The axiom state that such permutations would not change the value for individual data blocks, which is what we would expect in many ML applications. In Appendix A, we proved that Axiom 3 is implied by explanation here.
182
+
183
+ The 2D-efficiency axiom is inherited from 1D-Shapley, requiring that the sum of the values of all the data blocks equals the performance of the whole data set.
184
+
185
+ Based on the axioms, we provide a definition:
186
+
187
+ Definition 3.2. The value $\psi_{ij}(h)$ with respect to the utility function $h$ is a two-dimensional Shapley value (2D-Shapley for short) if $\psi_{ij}$ satisfies the 2d-linearity, 2d-dummy, 2d-symmetry and 2d-efficiency axioms, denoting as $\psi_{ij}^{2d}$ .
188
+
189
+ 2D-Shapley can be seen as the two-dimensional extension of Shapley values, which inherits its advantage with a natural adaptation of the dummy and symmetry axiom to the two-dimensional utility function scenario.
190
+
191
+ # 3.3.3. Representation Theory
192
+
193
+ We will show that there exists an analytic and unique solution for 2D-Shapley.
194
+
195
+ Theorem 3.3. (Representation Theory of 2D-Shapley) The $\psi_{ij}^{2d}$ has a unique solution:
196
+
197
+ $$
198
+ \psi_ {i j} ^ {2 d} = \frac {1}{n m} \sum_ {s = 1} ^ {n} \sum_ {f = 1} ^ {m} \Delta_ {s f}, \tag {8}
199
+ $$
200
+
201
+ where $i\in N$ $j\in M$
202
+
203
+ $$
204
+ \Delta_ {s f} = \frac {1}{\binom {n - 1} {s - 1} \binom {m - 1} {f - 1}} \sum_ {(S, F) \in D _ {s f} ^ {i j}} M _ {h} ^ {i, j} (S, F), \tag {9}
205
+ $$
206
+
207
+ $D_{sf}^{ij} = \{(S,F):S\subseteq N\backslash i,F\subseteq M\backslash j,|S| = s - 1,|F| = f - 1\}$ and $M_h^{i,j}(S,F)$ defined in Eq. (2).
208
+
209
+ Theorem 3.3 indicates that $\psi_{ij}^{2d}$ is a weighted average of the two-dimensional counterfactual in Eq. (2). Theorem 3.3 is referred to as the representation theory of 2D-Shapley, because the proof procedure shows that $\psi_{ij}^{2d}$ has a basis expansion formulation (see Eq. (15) in Appendix B). To show the basis expansion, a series of basic utility functions in $G$ needs to be defined (e.g., Eq. (13)). Compared with the representation theory of 1D-Shapley by Roth (1988), one technical challenge is to define the basis and basic utility functions for the 2D case to handle the 2D counterfactual. Furthermore, the proof of the uniqueness of 2D-Shapley has to solve a complex high-dimensional linear system (see Eq. (19) in Appendix B). Our proof incorporates new techniques, unseen in the classic proof of 1D-Shapley, to deal with these unique technical challenges arising in the 2D context.
210
+
211
+ Moreover, the representation theory also implies that 2D-Shapley can be reduced to 1D-Shapley. The following corollary shows that summing up the block values over all rows gives 1D-Shapley of features, and summing up the block values over all columns gives 1D-Shapley of samples. Corollary 3.4 does not only indicate that the 2D-Shapley is a natural generalization of 1D-Shapley, but also is useful for discussing the experimental results of how 2D values can explain 1D values (see Subsection 4.1).
212
+
213
+ Corollary 3.4. For any $h\in G$ , let $\psi_{i}^{1d}(h)\coloneqq \sum_{j\in M}\psi_{ij}^{2d}(h)$ and $\psi_{,j}^{1d}(h)\coloneqq \sum_{i\in N}\psi_{ij}^{2d}(h)$ , then
214
+
215
+ $$
216
+ \psi_ {i} ^ {1 d} (h) = \frac {1}{n} \sum_ {\substack {S \subseteq N \setminus i \\ | S | = s}} \frac {1}{\binom {n - 1}{s}} [ h (S \cup i, M) - h (S, M) ], \tag{10}
217
+ $$
218
+
219
+ and
220
+
221
+ $$
222
+ \psi_ {. j} ^ {1 d} (h) = \frac {1}{m} \sum_ {\substack {F \subset M \setminus j \\ | F | = f}} \frac {1}{\binom {m - 1}{f}} [ h (N, F \cup j) - h (N, F) ], \tag{11}
223
+ $$
224
+
225
+ which are in the form of 1D-Shapley.
226
+
227
+ Finally, having the analytical expression Eq. (8) of 2D-Shapley at hand will provide us with great convenience in designing efficient algorithms.
228
+
229
+ # 3.3.4. Efficient Algorithm
230
+
231
+ The computational complexity of exactly calculating 2D-Shapley is exponential in $mn$ due to the summation over all possible rows and columns. To overcome this challenge, we develop a Monte Carlo approach to approximating 2D-Shapley. The key idea is that 2D-Shapley can be rewritten as an expectation of the marginal contribution of $(i,j)$ to the blocks indexed by row indices before $i$ and column indices before $j$ over random permutations of rows and columns. As a result, we can approximate 2D-Shapley by taking an average over randomly sampled rows and columns. We also design the algorithm in ways that can reuse utility function evaluations across different permutations, which gives rise to significant efficiency gains. The full details of the algorithm design are provided in Appendix E, and the pseudo-code is shown in Algorithm 1.
232
+
233
+ Evaluating the utility function requires retraining a model. For small-scale datasets, it might be possible to evaluate the utility function within a reasonable time multiple times, but for large-scale datasets, even evaluating it once might require days to finish. This would deem our method impractical for any applications. Nonetheless, we can even obviate all model training to compute our values when using $K$ -nearest-neighbor (KNN) as a surrogate model. KNN-surrogate-based data valuation has shown great computational advantage while providing effective data quality identification (Jia et al., 2019a). In this work, we leverage a similar idea to reduce the computational complexity of 2D-Shapley for large models. First, let us observe from Eq. (8) and Corollary. D.1 that after rearranging inner terms, we have:
234
+
235
+ $$
236
+ \psi_ {i j} ^ {2 d} = \frac {1}{n ! m !} \sum_ {\substack {\pi_ {1} \in \Pi (N) \\ \pi_ {2} \in \Pi (M)}} \left[ h \left(P _ {i} ^ {\pi_ {1}} \cup i, P _ {j} ^ {\pi_ {2}} \cup j\right) - \right. \tag{12}
237
+ $$
238
+
239
+ $$
240
+ \left. h \left(P _ {i} ^ {\pi_ {1}}, P _ {j} ^ {\pi_ {2}} \cup j\right) \right] - \left[ h \left(P _ {i} ^ {\pi_ {1}} \cup i, P _ {j} ^ {\pi_ {2}}\right) - h \left(P _ {i} ^ {\pi_ {1}}, P _ {j} ^ {\pi_ {2}}\right) \right],
241
+ $$
242
+
243
+ where $\Pi(X)$ is a set of all permutations of $X$ , $\pi \in \Pi(X)$ is a permutation of $X$ , and $P_{i}^{\pi}$ is a set of elements preceding $i$ in $\pi$ . The expression in the first bracket is the 1D marginal contribution of sample $i$ and is valid since both utilities are trained on same features, $P_{j}^{\pi_{2}} \cup j$ . Similarly, the second bracket also represents a valid 1D marginal contribution of the sample $i$ but with features $P_{j}^{\pi_{2}}$ . From this observation, we can apply the results of 1D-Shapley value approximated with nearest neighbors, $\phi^{\mathrm{KNN}}$ , defined recursively in Theorem 1 (Jia et al., 2019a), and the 2D-Shapley under KNN surrogates can be therefore expressed as
244
+
245
+ $$
246
+ \psi_ {i j} ^ {2 \mathrm {d} - \mathrm {K N N}} = \frac {1}{m !} \sum_ {\pi_ {2} \in \Pi (M)} [ \phi^ {\mathrm {K N N}} (i, P _ {j} ^ {\pi_ {2}} \cup j) - \phi^ {\mathrm {K N N}} (i, P _ {j} ^ {\pi_ {2}}) ].
247
+ $$
248
+
249
+ This new formulation is efficient as it requires no more model training and removes the summing over all possible permutations of samples. We can further approximate the sum over all possible permutations over features with the average over sampled permutations. Our final complexity becomes $\mathcal{O}(PT|M||N|^2\log |N|)$ , where $P$ is the number of sampled feature permutations, $T$ is the number of test points used for evaluating model performance, and $|N|, |M|$ are the cardinality of $N$ and $M$ respectively, and the pseudo-code for the overall KNN-based approximation is provided in Algorithm 2.
250
+
251
+ # 4. Experiments
252
+
253
+ This section covers the two general application scenarios of 2D-Shapley. (1) Cell valuation, where each cell in the training data matrix is considered a data source and receives a score indicating its contribution to a learning task performed on the matrix. We mainly demonstrate this application scenario's benefits in fine-grained data debugging and interpreting canonical sample-wise or feature-wise data values. (2) Sub-matrix valuation, where a sub-matrix containing multiple cells is considered a data source and receives a joint score. This scenario is closely related to data marketplaces, where each entity provides a dataset that appears as a submatrix in the aggregated data. Details about datasets, models, implementations, and ablation studies on a budget of inserted outliers are provided in Appendix F.
254
+
255
+ # 4.1. Cell Valuation
256
+
257
+ Sanity check of cell-wise values. We first check whether the cell-wise values produced by our method make sense via the data removal experiments commonly used in the data valuation literature. Specifically, we would expect that removing the cells with the highest values from the training set leads to the most significant performance degradation; conversely, removing the cells with the lowest values should barely affect the model performance. To evaluate the model performance after removal, we "remove" a cell by refilling its content with the average of all other cells on the same feature column. In the previous section, we present two algorithms to calculate 2D-Shapley. We will label the values obtained from the Monte Carlo-based method as 2D-Shapley-MC and the ones from the KNN-surrogate-based method as 2D-Shapley-KNN.
258
+
259
+ 1D-Shapley and random removal are used as our baselines. In particular, 1D-Shapley is estimated by the permutation sampling described in (Jia et al., 2019b). For each baseline, we remove a number of cells at a time based on their sample-feature value ranking in either descending or ascending order; then, we train a model on the reduced dataset and evaluate the model performance.
260
+
261
+ As shown in Figure 4, when removing cells in ascending value order, 2D-Shapley can not only maintain the model performance but also improve it by at least $2\%$ for Census, Credit, and Breast Cancer datasets, whereas 1D-Shapley dips the model performance earlier than 2D-Shapley in all three datasets. Considering removal from the highest valued cells, we observe that 2D-Shapley can effectively detect contributing cells, and removing these cells causes the model performance to drop quickly. By contrast, removing cells according to 1D-Shapley is close to random removal. These results indicate that 2D-Shapley is more effective than 1D-Shapley at recognizing the contribution of cells and can better inform strategic data harnessing in ML.
262
+
263
+ Fine-Grained Outlier Localization. Existing horizontal data valuation methods have demonstrated promising results in detecting abnormal samples (Ghorbani & Zou, 2019; Kwon & Zou, 2022; Wang & Jia, 2023) by finding lowest-valued samples. However, it is rarely the case that every cell in the sample is abnormal. For instance, a type of error in the Census data is "198x→189x", where the years of birth are wrongly specified; this error could appear on a single feature column and, at the same, only affects partial samples (or users) born in 198x. Existing horizontal valuation remains limited in localizing these erroneous entries.
264
+
265
+ To demonstrate the potential of 2D-Shapley in fine-grained entry-wise outlier detection, we first inject outlier cells into the clean dataset, Breast Cancer Dataset. Following a recent outlier generation technique in (Du et al., 2022), we inject low-probability-density values into the dataset as outlier cells. We explain the outlier injection method in detail in Appendix F.3. We randomly place outlier cells in $2\%$ (50 of total cells). Afterward, we compute 2D-Shapley-KNN for each cell in the dataset with inserted outliers, which are shown in Figure 10. Since we expect outliers not to be helpful for the model performance, the values for outlier cells should be low. Therefore, we sort the 2D-Shapley cell values in ascending order and prioritize human inspection towards the ones with the lowest values. We show the detection rate of the inserted outliers in Figure 5A). As we can see, with 2D-Shapley values, we can detect $90\%$ of inserted outliers within the first $5\%$
266
+
267
+ ![](images/1567d9121fd66969f4dbde7557231e9198e133750bbdcf880ca28b8ae9044731.jpg)
268
+ Figure 4: Performance comparison between 2D-Shapley and baselines on various use cases.
269
+
270
+ ![](images/fcc1fd6a61b06a8bf682f7a8ed6d09ca444dfc4ef7d7098bd42c91f003ee510c.jpg)
271
+ Figure 5: A) Detection of the inserted outliers in the Breast Cancer dataset. B) Detection of the inserted outliers in the Age category of the Census dataset.
272
+
273
+ ![](images/1692fecfd07bc2f1bd43fbccb46d11babd5bb4024df51d6248e23a4fe42aff5a.jpg)
274
+ Figure 6: 2D Shapley vs Model Performance on various dataset splits.
275
+
276
+ ![](images/c9eaf718075e22690bd3704e26fd199378d7b8f10a12c57f7b5cb969815ce1fd.jpg)
277
+ Figure 7: Cell values of samples with similar 1D values in Breast Cancer dataset.
278
+
279
+ of all cells. By contrast, based on the values produced by 1D-Shapley, one would need over $90\%$ of cell inspection to screen out all the outlier cells.
280
+
281
+ We further examine a practical case of outliers caused by human errors, where the cells have been incorrectly typed, e.g., "18" became "81". In the Census dataset, for the feature "Age", we randomly swap 15 cells between "17" and "71", "18" and "81", "19" and "91". Similarly, we sort the values of all cells in the dataset in ascending order. As we observe in Figure 5B), detection with 2D-Shapley outperforms 1D-Shapley. Particularly, with 2D-Shapley we can detect $80\%$ of added outliers with less than 1800 inspected cells while 1D-Shapley requires 4 times as many cells to achieve a comparable rate. The 1D-Shapley and 2D-Shapley heatmaps are provided in Appendix. The results above demonstrate the effectiveness of 2D-Shapley in locating outlier cells in a dataset.
282
+
283
+ Enabling Interpretation of 1D Valuation Results. Apart from outlier detection, 2D-Shapley also brings new insights into horizontal sample valuation or vertical feature valuation, which is referred to as 1D valuation. For instance, 1D sample valuation produces an importance score for each sample, but we lack a deeper understanding of why a sample receives a certain value.
284
+
285
+ Recall Corollary 3.4 that the sum of 2D-Shapley over rows or columns gives 1D feature values and 1D sample values, respectively. Hence, 2D-Shapley allows one to interpret the 1D value of a sample by further breaking it down to contributions of different features in that sample. That is, 2D-Shapley gives insights into the relative importance of different features of a sample to the valuation result received by the sample. For example, in Figure 7A), we observe that two samples have similar 1D values and their cell values are also close. However, in Figure 7B), we observe a contrasting case, where although both samples have a close 1D value, their cell values are completely unrelated. More detailed results can be found in Appendix F.3.
286
+
287
+ # 4.2. Sub-matrix Valuation
288
+
289
+ We turn to the application of 2D-Shapley to inform dataset pricing in the data marketplace. 2D-Shapley enables a principled method to value fragmented data sources as illustrated in Figure 1(c), where each source is a sub-matrix in the aggregated training data matrix. A reasonable measure of a source's value should reflect its usefulness for ML. Hence, to verify the significance of the resulting values for sub-matrix valuation, we measure the model performance trained on a source and examine the correlation between its value and the
290
+
291
+ performance. For this experiment, we use the Credit Dataset with sources contributing fragmented data and consider multiple random splits of the dataset. The results are provided in Figure 6, where each line corresponds to a different split of the aggregate data into individual sources. Figure 6 shows that with the increasing model performance trained on the block, its corresponding 2D-Shapley block value also increases.
292
+
293
+ # 5. Conclusion
294
+
295
+ This work aims to set the theoretical foundation for more realistic data valuation application scenarios. In particular, we investigate the block valuation problem and present 2D-Shapley, a new data value notion that is suitable to solve this problem. 2D-Shapley empowers a range of new use cases, such as informing the pricing of fragmented data, strategic data selection on a fine-grained scale, and interpreting 1D valuation results. Our work opens up many new venues for future investigation. First, we can immediately adapt our proof technique to prove a two-dimensional generalization of other typical data value notions (Kwon & Zou, 2022; Wang & Jia, 2023). Second, it is interesting to build upon our framework to evaluate irregular-shaped data sources (Fang et al., 2019) and incorporate label information for joint valuation in a principled way.
296
+
297
+ # Acknowledgements
298
+
299
+ Xiangyu Chang's work was partly supported by the National Natural Science Foundation for Outstanding Young Scholars of China under Grant 72122018 and partly by the Natural Science Foundation of Shaanxi Province under Grant 2021JC-01. Xi Chen would like to thank the support from NSF via the Grant IIS-1845444.
300
+
301
+ # References
302
+
303
+ Covert, I., Lundberg, S. M., and Lee, S.-I. Understanding global feature contributions with additive importance measures. Advances in Neural Information Processing Systems, 33:17212-17223, 2020.
304
+ Du, X., Wang, Z., Cai, M., and Li, Y. Vos: Learning what you don't know by virtual outlier synthesis. arXiv preprint arXiv:2202.01197, 2022.
305
+ Dua, D. and Graff, C. UCI machine learning repository, 2017. URL http://archive.ics.uci.edu/ml.
306
+ Fang, F., Lan, W., Tong, J., and Shao, J. Model averaging for prediction with fragmentary data. Journal of Business & Economic Statistics, 37(3):517-527, 2019.
307
+ Ghorbani, A. and Zou, J. Data shapley: Equitable valuation of data for machine learning. In International Conference on Machine Learning, pp. 2242-2251. PMLR, 2019.
308
+ Jia, R., Dao, D., Wang, B., Hubis, F. A., Gürel, N. M., Li, B., Zhang, C., Spanos, C. J., and Song, D. Efficient task-specific data valuation for nearest neighbor algorithms. Proceedings of the VLDB Endowment, 12(11):1610-1623, 2019a.
309
+ Jia, R., Dao, D., Wang, B., Hubis, F. A., Hynes, N., Gurel, N. M., Li, B., Zhang, C., Song, D., and Spanos, C. J. Towards efficient data valuation based on the Shapley value. In The 22nd International Conference on Artificial Intelligence and Statistics, pp. 1167-1176. PMLR, 2019b.
310
+ Just, H. A., Kang, F., Wang, J. T., Zeng, Y., Ko, M., Jin, M., and Jia, R. Lava: Data valuation without pre-specified learning algorithms. In International Conference on Learning Representations, 2023.
311
+ Karlas, B., Dao, D., Interlandi, M., Li, B., Schelter, S., Wu, W., and Zhang, C. Data debugging with shapley importance over end-to-end machine learning pipelines. arXiv preprint arXiv:2204.11131, 2022.
312
+ Kwon, Y. and Zou, J. Beta Shapley: a unified and noise-reduced data valuation framework for machine learning. In International Conference on Artificial Intelligence and Statistics, pp. 8780-8802. PMLR, 2022.
313
+ Lin, J., Zhang, A., Lécuyer, M., Li, J., Panda, A., and Sen, S. Measuring the effect of training data on deep learning predictions via randomized experiments. In
314
+
315
+ International Conference on Machine Learning, pp. 13468-13504. PMLR, 2022.
316
+ Lundberg, S. M. and Lee, S.-I. A unified approach to interpreting model predictions. Advances in Neural Information Processing Systems, 30:4768-4777, 2017.
317
+ Roth, A. E. The Shapley value: essays in honor of Lloyd S. Shapley. Cambridge University Press, 1988.
318
+ Shapley, L. S. A value for n-person games. Contributions to the Theory of Games, 2(28):307-317, 1953.
319
+ Shapley, L. S. A value for n-person games. Classics in game theory, 69, 1997.
320
+ Strumbelj, E. and Kononenko, I. An efficient explanation of individual classifications using game theory. Journal of Machine Learning Research, 11:1-18, 2010.
321
+ Tang, S., Ghorbani, A., Yamashita, R., Rehman, S., Dunnmon, J. A., Zou, J., and Rubin, D. L. Data valuation for medical imaging using Shapley value and application to a large-scale chest X-ray dataset. Scientific Reports, 11(1):1-9, 2021.
322
+ Tay, S. S., Xu, X., Foo, C. S., and Low, B. K. H. Incentivizing collaboration in machine learning via synthetic data rewards. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 36, pp. 9448-9456, 2022.
323
+ Wang, J. T. and Jia, R. A robust data valuation framework for machine learning. In International Conference on Artificial Intelligence and Statistics. PMLR, 2023.
324
+ Wang, T. and Jia, R. Data banzhaf: A data valuation framework with maximal robustness to learning stochasticity. arXiv preprint arXiv:2205.15466, 2022.
325
+ Wu, M., Jia, R., Huang, W., Chang, X., et al. Robust data valuation via variance reduced data shapley. arXiv preprint arXiv:2210.16835, 2022a.
326
+ Wu, Z., Shu, Y., and Low, B. K. H. Davinz: Data valuation using deep neural networks at initialization. In International Conference on Machine Learning, pp. 24150-24176. PMLR, 2022b.
327
+ Yan, T. and Procaccia, A. D. If you like shapley then you'll love the core. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pp. 5751-5759, 2021.
328
+
329
+ Yona, G., Ghorbani, A., and Zou, J. Who's responsible? jointly quantifying the contribution of the learning algorithm and data. In Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society, pp. 1034-1041, 2021.
330
+ Yoon, J., Arik, S., and Pfister, T. Data valuation using reinforcement learning. In International Conference on Machine Learning, pp. 10842-10851. PMLR, 2020.
331
+
332
+ # 2D-Shapley: A Framework for Fragmented Data Valuation Supplementary Materials
333
+
334
+ A. Proof of the fact that Axiom 3 is implied by its explanation
335
+
336
+ The explanation above is: for $i_1, i_2 \in N$ , $j_1, j_2 \in M$ , if for any $S \subseteq N \setminus \{i_1, i_2\}$ and $F \subseteq M$ , $h(S \cup i_1, F) = h(S \cup i_2, F)$ , and for any $S \subseteq N$ and $F \subseteq M \setminus \{j_1, j_2\}$ , $h(S, F \cup j_1) = h(S, F \cup j_2)$ , then $\psi_{i_1j_1}(h) = \psi_{i_2j_2}(h)$ .
337
+
338
+ For the proof, we prove in three steps that the explanation is equivalent to Axiom 3. Note that we should assume Axiom 1, 2 and 4 already exist. For simplicity, we use the lowercase letter to denote the cardinality of a set, for example, $|S| = s$ .
339
+
340
+ We want to prove the following proposition.
341
+
342
+ Proposition A.1. If Axiom 1, 2 and 4 exist, then Axiom 3 is equivalent to its explanation.
343
+
344
+ Proof. For the direction that Axiom 3 is implied by its explanation, we prove in three steps.
345
+
346
+ - Step 1: Define a utility function $h_{S,F}$ :
347
+
348
+ $$
349
+ h _ {S, F} \left(W _ {1}, W _ {2}\right) = \left\{ \begin{array}{l l} 1, i f S \subseteq W _ {1}, F \subseteq W _ {2}. \\ 0, o t h e r w i s e. \end{array} \right. \tag {13}
350
+ $$
351
+
352
+ For fixed $S \subseteq N$ , $F \subseteq M$ and $i_1, i_2 \in S$ , $j_1, j_2 \in F$ and for all $W_1 \subseteq N \setminus \{i_1, i_2\}$ , $W_2 \subseteq M \setminus \{j_1, j_2\}$ , $M_{h_{S,F}}^{i_1,j_1}(W_1, W_2) = M_{h_{S,F}}^{i_2,j_2}(W_1, W_2)$ . It leads to the conclusion that $\psi_{i_1j_1}(h_{S,F}) = \psi_{i_2j_2}(h_{S,F})$ according to the explanation.
353
+
354
+ For $i^* \notin S$ , $j \in M$ (or $j^* \notin F$ , $i \in N$ ) and $W_1 \subseteq N \setminus i^*$ , $W_2 \subseteq M \setminus j$ , ( $W_1 \subseteq N \setminus i$ , $W_2 \subseteq M \setminus j^*$ ), $M_{h_{S,F}}^{i^*,j}(W_1, W_2) = 0$ . ( $M_{h_{S,F}}^{i,j^*}(W_1, W_2) = 0$ .) It leads to the conclusion that $\psi_{i^*j}(h_{S,F}) = 0$ , $\forall j \in M$ ( $\psi_{ij^*}(h_{S,F}) = 0$ , $\forall i \in N$ ) according to Axiom 2.
355
+
356
+ In summary, we have conclusion that the values $\psi_{ij}$ are the same when $i\in S,j\in F$ , and otherwise zero. According to Axiom 4,
357
+
358
+ $$
359
+ 1 = h_{S,F}(N,M) = \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(h_{S,F}) = \sum_{\substack{i\in S\\ j\in F}}\psi_{ij}(h_{S,F}).
360
+ $$
361
+
362
+ then $\psi_{ij}(h_{S,F}) = 1 / sf$ , where $i \in S, j \in F$ .
363
+
364
+ - Step 2: We have to prove a lemma which shows another formation of a utility $h$ by using $h_{S,F}$ defined above.
365
+
366
+ Lemma A.2.
367
+
368
+ $$
369
+ h = \sum_{\substack{S\subset N\\ F\subsetneq M}}C_{S,F}(h)h_{S,F},
370
+ $$
371
+
372
+ where $C_{S,F}(h) = \sum_{\substack{S' \subseteq S \\ F' \subseteq F}} (-1)^{s + f - s' - f'} h(S',F')$ .
373
+
374
+ Proof. We can directly verify the lemma.
375
+
376
+ $$
377
+ \begin{array}{l} h(W_{1},W_{2}) = \sum_{\substack{S\subseteq N\\ F\subseteq M}}C_{S,F}(h)h_{S,F}(W_{1},W_{2}) \\ = \sum_{\substack{S\subseteq W_{1}\\ F\subseteq W_{2}}}\sum_{\substack{S^{\prime}\subseteq S\\ F^{\prime}\subseteq F}}(-1)^{s + f - s^{\prime} - f^{\prime}}h(S^{\prime},F^{\prime}) \\ = \sum_{\substack{S^{\prime}\subseteq W_{1}\\ F^{\prime}\subseteq W_{2}}}\bigl[\sum_{s = s^{\prime}}^{w_{1}}(-1)^{s - s^{\prime}}\binom {w_{1} - s^{\prime}}{s - s^{\prime}}\sum_{f = f^{\prime}}^{w_{2}}(-1)^{f - f^{\prime}}\binom {w_{2} - f^{\prime}}{f - f^{\prime}}\bigr ]h(S^{\prime},F^{\prime}) \\ = h \left(W _ {1}, W _ {2}\right). \\ \end{array}
378
+ $$
379
+
380
+ - Step 3: Combine the first two steps, and by Axiom 1,
381
+
382
+ $$
383
+ \begin{array}{l} \psi_{ij}(h) = \sum_{\substack{S\subseteq N\\ F\subseteq M}}C_{S,F}(h)\psi_{ij}(h_{S,F}) \\ = \sum_{\substack{i\in S\subset N\\ j\in F\subsetneq M}}C_{S,F}(h) / sf. \\ \end{array}
384
+ $$
385
+
386
+ Let $\pi_1, \pi_2$ be two permutations on $N$ and $M$ respectively, then
387
+
388
+ $$
389
+ \begin{array}{l} \psi_{\pi_{1}(i)\pi_{2}(j)}(\pi_{1}\pi_{2}h) = \sum_{\substack{\pi_{1}(i)\in S\subseteq N\\ \pi_{2}(j)\in F\subseteq M}}C_{S,F}(\pi_{1}\pi_{2}h) / sf \\ = \sum_{\substack{i\in \pi_{1}(S)\subseteq N\\ j\in \pi_{2}(F)\subseteq M}}C_{\pi_{1}(S),\pi_{2}(F)}(h) / sf \\ = \psi_ {i j} (h). \\ \end{array}
390
+ $$
391
+
392
+ For another direction that Axiom 3 implies its explanation, since we already assume Axiom 1, 2, 3 and 4 hold, then we have the formula of 2D-Shapley, that is, Eq. (22). Clearly, we can see the numerator is always the same for both $i_1j_1$ and $i_2j_2$ under the same $S$ and $F$ , hence $\psi_{i_1j_1}(h) = \psi_{i_2j_2}(h)$ .
393
+
394
+ # B. Proof of the representation theory of 2D-Shapley
395
+
396
+ In this section, we will justify the representation theory by a number of proposed lemmas. The proof process is to add the axioms one by one and try to show what each axiom does for 2D-Shapley. We add linearity and dummy axioms first to get a sum of weighted marginals.
397
+
398
+ Lemma B.1. For any value $\psi_{ij}$ satisfying the 2d-linearity and 2d-dummy axioms (Axiom 1 and 2), we have that
399
+
400
+ $$
401
+ \begin{array}{l} \psi_ {i j} (h) = \sum_ {S \subseteq N \backslash i F \subseteq M \backslash j} \sum_ {j} p _ {S, F} ^ {i j} [ h (S \cup i, F \cup j) + h (S, F) \\ - h (S \cup i, F) - h (S, F \cup j) ], \tag {14} \\ \end{array}
402
+ $$
403
+
404
+ where $\sum_{S\subseteq N\backslash i}\sum_{F\subseteq M\backslash j}p_{S,F}^{ij} = 1$
405
+
406
+ Proof. For any $h \in G$ ,
407
+
408
+ $$
409
+ h = \sum_ {\substack {S \subseteq N \\ F \subseteq M}} h (S, F) W _ {S, F}, \tag{15}
410
+ $$
411
+
412
+ where
413
+
414
+ $$
415
+ W _ {S, F} (W _ {1}, W _ {2}) = \left\{ \begin{array}{l l} 1, i f W _ {1} = S, W _ {2} = F. \\ 0, o t h e r w i s e. \end{array} \right.
416
+ $$
417
+
418
+ By the 2d-linearity axiom,
419
+
420
+ $$
421
+ \psi_{ij}(h) = \sum_{\substack{S\subseteq N\\ F\subseteq M}}h(S,F)\psi_{ij}(W_{S,F}).
422
+ $$
423
+
424
+ Now define another utility function $W_{S,F}^{\prime}$ :
425
+
426
+ $$
427
+ W _ {S, F} ^ {\prime} (W _ {1}, W _ {2}) = \left\{ \begin{array}{l l} 1, i f S \subseteq W _ {1}, F = W _ {2}. \\ 0, o t h e r w i s e. \end{array} \right.
428
+ $$
429
+
430
+ For any $S \subseteq N \backslash i$ and $F \subseteq M \backslash j$ , we can check that block $(i, j)$ is a dummy for $W_{S,F}^{\prime}$ , then by the 2d-dummy axiom, $\psi_{ij}(W_{S,F}^{\prime}) = 0$ . Especially, let $S = N \backslash i$ and any fixed $F^{\prime} \subseteq M \backslash j$ , we have:
431
+
432
+ $$
433
+ \psi_ {i j} \left(W _ {N, F ^ {\prime}}\right) + \psi_ {i j} \left(W _ {N \backslash i, F ^ {\prime}}\right) = 0.
434
+ $$
435
+
436
+ For inductive purposes, assume it has been shown that $\psi_{ij}(S,F') + \psi_{ij}(S\cup i,F') = 0$ for fixed $F^{\prime}\subseteq M\backslash j$ and every $S\subseteq N\backslash i$ with $|S|\geq k\geq 2$ . (The case $k = n - 1$ has been proved.) Now take fixed $S\subseteq N\backslash i$ with $|S| = k - 1$ , then
437
+
438
+ $$
439
+ \begin{array}{l} 0 = \psi_ {i j} \left(W _ {S, F ^ {\prime}} ^ {\prime}\right) = \sum_ {S \subseteq S _ {1} \subseteq N} \psi_ {i j} \left(W _ {S _ {1}, F ^ {\prime}}\right) \\ = \psi_{ij}(W_{S\cup i,F^{\prime}}) + \psi_{ij}(W_{S,F^{\prime}}) + \sum_{\substack{S_{1}\subseteq N\setminus i\\ S\not\subsetneq S_{1}}}[\psi_{ij}(W_{S_{1}\cup i,F^{\prime}}) + \psi_{ij}(W_{S_{1},F^{\prime}})] \\ = \psi_ {i j} \left(W _ {S \cup i, F ^ {\prime}}\right) + \psi_ {i j} \left(W _ {S, F ^ {\prime}}\right). \\ \end{array}
440
+ $$
441
+
442
+ Therefore, $\psi_{ij}(W_{S\cup i,F'}) + \psi_{ij}(W_{S,F'}) = 0$ for all $S\subseteq N\backslash i$ and fixed $F^{\prime}\subseteq N\backslash j$ with $0 < |S|\leq n - 1$ and $0 < |F^{\prime}|\leq m - 1$ . Similarly, we have another conclusion that $\psi_{ij}(W_{S',F}) + \psi_{ij}(W_{S',F\cup j}) = 0$ for fixed $S^{\prime}\subseteq N\backslash i$ and all $F\subseteq N\backslash j$ with $0 < |S'| \leq n - 1$ and $0 < |F| \leq m - 1$ by simply defining another similar utility function $W_{S',F}^{\prime}$ and repeating the process above again.
443
+
444
+ Using the results above,
445
+
446
+ $$
447
+ \begin{array}{l} \psi_{ij}(h) = \sum_{\substack{S\subset N\\ F\subseteq M}}h(S,F)\psi_{ij}(W_{S,F}) \\ = \sum_ {F \subseteq M} \sum_ {S \subseteq N \backslash i} h (S \cup i, F) \psi_ {i j} \left(W _ {S \cup i, F}\right) + h (S, F) \psi_ {i j} \left(W _ {S, F}\right) \\ = \sum_ {S \subseteq N \backslash i} \sum_ {F \subseteq M} h (S \cup i, F) \psi_ {i j} \left(W _ {S \cup i, F}\right) - h (S, F) \psi_ {i j} \left(W _ {S \cup i, F}\right) \\ = \sum_ {S \subseteq N \backslash i} \sum_ {F \subseteq M \backslash j} \psi_ {i j} \left(W _ {S \cup i, F \cup j}\right) [ h (S \cup i, F \cup j) - h (S, F \cup j) ] \\ - \sum_ {S \subseteq N \backslash i} \sum_ {F \subseteq M \backslash j} \psi_ {i j} \left(W _ {S \cup i, F \cup j}\right) [ h (S \cup i, F) - h (S, F) ] \\ = \sum_ {S \subseteq N \backslash i} \sum_ {F \subseteq M \backslash j} \psi_ {i j} \left(W _ {S \cup i, F \cup j}\right) [ h (S \cup i, F \cup j) + h (S, F) \\ \left. - h (S, F \cup j) - h (S \cup i, F) \right]. \\ \end{array}
448
+ $$
449
+
450
+ For simplicity, denote $\psi_{ij}(W_{S\cup i,F\cup j})$ as $p_{S,F}^{ij}$ , then
451
+
452
+ $$
453
+ \psi_ {i j} (h) = \sum_ {S \subseteq N \backslash i F \subseteq M \backslash j} \sum_ {j} p _ {S, F} ^ {i j} \left[ h (S \cup i, F \cup j) + h (S, F) - h (S \cup i, F) - h (S, F \cup j) \right].
454
+ $$
455
+
456
+ Consider the utility function $h_{ij}$
457
+
458
+ $$
459
+ h _ {i j} (W _ {1}, W _ {2}) = \left\{ \begin{array}{l l} 1, i f i \in W _ {1}, j \in W _ {2}. \\ 0, o t h e r w i s e. \end{array} \right.
460
+ $$
461
+
462
+ and we can check that $ij$ is a dummy for $h_{ij}$ , and $\psi_{ij}(h_{ij}) = 1$ . Hence
463
+
464
+ $$
465
+ 1 = \psi_ {i j} (h _ {i j}) = \sum_ {S \subseteq N \backslash i} \sum_ {F \subseteq M \backslash j} p _ {S, F} ^ {i j}.
466
+ $$
467
+
468
+ Next, add the 2d-symmetry axiom to Lemma B.1 and we make the conclusion that $p_{S,F}^{ij}$ is only related to the cardinality of $S$ and $F$ , which is not associated with the name of the blocks.
469
+
470
+ Lemma B.2. Assume Lemma B.1 holds. If $\psi_{ij}$ also satisfies the 2d-symmetry axiom, then
471
+
472
+ $$
473
+ p _ {S, F} ^ {i j} = p _ {s, f},
474
+ $$
475
+
476
+ where $p_{s,f}$ is some common value for $S \subseteq N \setminus i$ , $F \subseteq M \setminus j$ and $0 \leq |S| = s \leq n - 1$ , $0 \leq |F| = f \leq m - 1$ .
477
+
478
+ Proof. Define a utility $\hat{h}_{S,F}$ :
479
+
480
+ $$
481
+ \hat {h} _ {S, F} (W _ {1}, W _ {2}) = \left\{ \begin{array}{l l} 1, i f S \subsetneq W _ {1}, F \subsetneq W _ {2}. \\ 0, o t h e r w i s e. \end{array} \right.
482
+ $$
483
+
484
+ 1. For $i \in N$ and $j \in M$ , let $S_1, F_1$ and $S_2, F_2$ be any two coalitions where $S_1, S_2 \subseteq N \setminus i$ and $F_1, F_2 \subseteq M \setminus j$ with $0 < |S_1| = |S_2| < n - 1$ and $0 < |F_1| = |F_2| < m - 1$ respectively. Consider two permutation $\pi_1$ and $\pi_2$ which satisfy $\pi_1(S_1) = S_2, \pi_1(i) = i$ and $\pi_2(F_1) = F_2, \pi_2(j) = j$ . Then,
485
+
486
+ $$
487
+ p _ {S _ {1}, F _ {1}} ^ {i j} = \psi_ {i j} (\hat {h} _ {S _ {1}, F _ {1}}) = \psi_ {i j} (\hat {h} _ {S _ {2}, F _ {2}}) = p _ {S _ {2}, F _ {2}} ^ {i j},
488
+ $$
489
+
490
+ where the central equality is a consequence of the 2d-symmetry axiom.
491
+
492
+ 2. For distinct $i_1, i_2 \in N$ and $j_1, j_2 \in M$ , let $S \subseteq N \setminus \{i_1, i_2\}$ and $F \subseteq M \setminus \{j_1, j_2\}$ , and the permutations $\pi_1, \pi_2$ respectively interchange $i_1, i_2$ and $j_1, j_2$ while leaving other elements fixed. Then,
493
+
494
+ $$
495
+ \pi_ {1} \pi_ {2} \hat {h} _ {S, F} = \hat {h} _ {S, F},
496
+ $$
497
+
498
+ $$
499
+ p _ {S, F} ^ {i _ {1} j _ {1}} = \psi_ {i _ {1} j _ {1}} (\hat {h} _ {S, F}) = \psi_ {i _ {2} j _ {2}} (\hat {h} _ {S, F}) = p _ {S, F} ^ {i _ {2} j _ {2}},
500
+ $$
501
+
502
+ where the central equality is a consequence of the 2d-symmetry axiom. Combining with the previous result in Step 1, we find that for every $0 < s < n - 1$ and $0 < f < m - 1$ , there is a $p_{s,f}$ such that $p_{S,F}^{ij} = p_{s,f}$ for every $i \in N$ and $j \in M$ , $S \subseteq N \setminus i$ and $F \subseteq M \setminus j$ with $|S| = s$ , $|F| = f$ .
503
+
504
+ 3. Similarly, by using different utility functions, we can find for $\forall i\in N,j\in M$ ..
505
+
506
+ - a $p_{n - 1,f}$ such that $p_{N\backslash i,F}^{ij} = p_{n - 1,f}$ for $F\subseteq M\backslash j$ and $0\leq |F| = f < m - 1$ ,
507
+ - a $p_{s,m-1}$ such that $p_{S,M\backslash j}^{ij} = p_{s,m-1}$ for $S \subseteq N\backslash i$ and $0 \leq |S| = s < n - 1$ ,
508
+ - a $p_{0,f}$ such that $p_{\emptyset, F}^{ij} = p_{0,f}$ for $F \subseteq M \backslash j$ and $0 < |F| = f < m - 1$ ,
509
+ - a $p_{s,0}$ such that $p_{S,\emptyset}^{ij} = p_{s,0}$ , for $S \subseteq N \backslash i$ and $0 < |S| = s < n - 1$ ,
510
+ a $p_{n - 1,m - 1}$ such that $p_{N\backslash i,M\backslash j}^{ij} = p_{n - 1,m - 1},$
511
+ - a $p_{0,0}$ such that $p_{\emptyset, \emptyset}^{ij} = p_{0,0}$ which makes the sum of all the weights equals to 1.
512
+
513
+ Finally add the 2d-efficiency axiom and obtain the uniqueness of 2D-Shapley.
514
+
515
+ Lemma B.3. Assume Lemma B.1 holds. Then $\psi_{ij}(h)$ satisfies the 2d-efficiency axiom if and only if
516
+
517
+ $$
518
+ \sum_ {\substack {i \in N \\ j \in M}} p _ {N \backslash i, M \backslash j} ^ {i j} = 1, \tag{16}
519
+ $$
520
+
521
+ $$
522
+ \sum_ {\substack {i \in S \\ j \in F}} p _ {S \backslash i, F \backslash j} ^ {i j} + \sum_ {\substack {i \notin S \\ j \notin F}} p _ {S, F} ^ {i j} - \sum_ {\substack {i \notin S \\ j \in F}} p _ {S, F \backslash j} ^ {i j} - \sum_ {\substack {i \in S \\ j \notin F}} p _ {S \backslash i, F} ^ {i j} = 0, \tag{17}
523
+ $$
524
+
525
+ where $S\subsetneq N$ or $F\subsetneq M$
526
+
527
+ Proof. On the one hand, by Eq. (16) and Eq. (17),
528
+
529
+ $$
530
+ \begin{array}{l} h(N,M) = \sum_{\substack{S\subseteq N\\ F\subseteq M}}h(S,F)[\sum_{\substack{i\in S\\ j\in F}}p_{S\setminus i,F\setminus j}^{ij} + \sum_{\substack{i\notin S\\ j\notin F}}p_{S,F}^{ij} - \sum_{\substack{i\notin S\\ j\in F}}p_{S,F\setminus j}^{ij} - \sum_{\substack{i\in S\\ j\notin F}}p_{S\setminus i,F}^{ij}] \\ = \sum_{\substack{i\in N\\ j\in M}}\sum_{\substack{S\subseteq N\setminus i\\ F\subseteq M\setminus j}}p_{S,F}^{ij}[h(S\cup i,F\cup j) + h(S,F) - h(S\cup i,F) - h(S,F\cup j)] \\ = \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(h). \\ \end{array}
531
+ $$
532
+
533
+ On the other hand, recall:
534
+
535
+ $$
536
+ \hat {h} _ {S, F} (W _ {1}, W _ {2}) = \left\{ \begin{array}{l l} 1, i f S \subsetneq W _ {1}, F \subsetneq W _ {2}. \\ 0, o t h e r w i s e. \end{array} \right.
537
+ $$
538
+
539
+ and
540
+
541
+ $$
542
+ h _ {S, F} (W _ {1}, W _ {2}) = \left\{ \begin{array}{l l} 1, i f S \subseteq W _ {1}, F \subseteq W _ {2}. \\ 0, o t h e r w i s e. \end{array} \right.
543
+ $$
544
+
545
+ Consider two new utility functions
546
+
547
+ $$
548
+ \tilde {h} _ {S, F} (W _ {1}, W _ {2}) = \left\{ \begin{array}{l l} 1, i f S \subsetneq W _ {1}, F \subseteq W _ {2}, \\ 0, o t h e r w i s e. \end{array} \right.
549
+ $$
550
+
551
+ and
552
+
553
+ $$
554
+ \bar {h} _ {S, F} (W _ {1}, W _ {2}) = \left\{ \begin{array}{l l} 1, i f S \subseteq W _ {1}, F \subsetneq W _ {2}, \\ 0, o t h e r w i s e. \end{array} \right.
555
+ $$
556
+
557
+ Then for any $S \subseteq N$ , $F \subseteq M$
558
+
559
+ $$
560
+ \begin{array}{l} \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(h_{S,F}) + \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(\hat{h}_{S,F}) - \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(\tilde{h}_{S,F}) - \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(\bar{h}_{S,F}) \\ = \sum_{\substack{i\in S\\ j\in F}}p^{ij}_{S\setminus i,F\setminus j} + \sum_{\substack{i\notin S\\ j\notin F}}p^{ij}_{S,F} - \sum_{\substack{i\notin S\\ j\in F}}p^{ij}_{S,F\setminus j} - \sum_{\substack{i\in S\\ j\notin F}}p^{ij}_{S\setminus i,F}. \\ \end{array}
561
+ $$
562
+
563
+ When $S = N$ and $F = M$ ,
564
+
565
+ $$
566
+ \begin{array}{l} \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(h_{N,M}) + \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(\hat{h}_{N,M}) - \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(\tilde{h}_{N,M}) - \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(\bar{h}_{N,M}) \\ = h _ {N, M} (N, M) + \hat {h} _ {N, M} (N, M) - \bar {h} _ {N, M} (N, M) - \bar {h} _ {N, M} (N, M) \\ = 1, \\ \end{array}
567
+ $$
568
+
569
+ Otherwise,
570
+
571
+ $$
572
+ \begin{array}{l} \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(h_{S,F}) + \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(\hat{h}_{S,F}) - \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(\tilde{h}_{S,F}) - \sum_{\substack{i\in N\\ j\in M}}\psi_{ij}(\bar{h}_{S,F}) \\ = h _ {S, F} (N, M) + \hat {h} _ {S, F} (N, M) - \bar {h} _ {S, F} (N, M) - \bar {h} _ {S, F} (N, M) \\ = 0. \\ \end{array}
573
+ $$
574
+
575
+ Hence, Eq. (16) and Eq. (17) can be easily obtained.
576
+
577
+ Now, let's prove Theorem 3.3.
578
+
579
+ Proof of Theorem 3.3. By Lemma B.2,
580
+
581
+ $$
582
+ \begin{array}{l} \psi_{ij}(h) = \sum_{s = 0}^{n - 1}\sum_{f = 0}^{m - 1}\sum_{\substack{S\subseteq N\setminus i\\ |S| = s}}\sum_{\substack{F\subseteq M\setminus j\\ |F| = f}}p_{s,f}[h(S\cup i,F\cup j) + h(S,F) \\ \left. - h (S \cup i, F) - h (S, F \cup j) \right]. \\ \end{array}
583
+ $$
584
+
585
+ By Lemma B.1 and Lemma B.3, we have the following equations:
586
+
587
+ $$
588
+ \begin{array}{l} \sum_ {s = 0} ^ {n - 1} \sum_ {f = 0} ^ {m - 1} \binom {n - 1} {s} \binom {m - 1} {f} p _ {s, f} = 1, \\ s f \cdot p _ {s - 1, f - 1} + (n - s) (m - f) \cdot p _ {s, f} = (n - s) f \cdot p _ {s, f - 1} \\ + s (m - f) p _ {s - 1, f}, 1 \leq s \leq n - 1, 1 \leq f \leq m - 1, \tag {18} \\ \end{array}
589
+ $$
590
+
591
+ $$
592
+ \begin{array}{l} (m - f) \cdot p _ {0, f} = f \cdot p _ {0, f - 1}, 1 \leq f \leq m - 1, \\ (n - s) \cdot p _ {s, 0} = s \cdot p _ {s - 1, 0}, 1 \leq s \leq n - 1, \\ n m \cdot p _ {n - 1, m - 1} = 1. \\ \end{array}
593
+ $$
594
+
595
+ Actually, we can omit the first equation and the conditions are:
596
+
597
+ $$
598
+ \begin{array}{l} s f \cdot p _ {s - 1, f - 1} + (n - s) (m - f) \cdot p _ {s, f} = (n - s) f \cdot p _ {s, f - 1} \\ + s (m - f) p _ {s - 1, f}, 1 \leq s \leq n - 1, 1 \leq f \leq m - 1, \\ (m - f) \cdot p _ {0, f} = f \cdot p _ {0, f - 1}, 1 \leq f \leq m - 1, \tag {19} \\ \left(n - s\right) \cdot p _ {s, 0} = s \cdot p _ {s - 1, 0}, 1 \leq s \leq n - 1, \\ n m \cdot p _ {n - 1, m - 1} = 1. \\ \end{array}
599
+ $$
600
+
601
+ Hence, we have $n \cdot m$ variables and $(m - 1)(n - 1) + (m - 1) + (n - 1) + 1 = n \cdot m$ equations.
602
+
603
+ Eq. (19) has a solution:
604
+
605
+ $$
606
+ p _ {s, f} = \frac {s ! (n - s - 1) !}{n !} \cdot \frac {f ! (m - f - 1) !}{m !}. \tag {20}
607
+ $$
608
+
609
+ Therefore,
610
+
611
+ $$
612
+ \begin{array}{l} \psi_{ij}(h) = \sum_{s = 0}^{n - 1}\sum_{f = 0}^{m - 1}\sum_{\substack{S\subseteq N\setminus i\\ |S| = s}}\sum_{\substack{F\subseteq M\setminus j\\ |F| = f}}\frac{s!(n - s - 1)!}{n!}\cdot \frac{f!(m - f - 1)!}{m!}[h(S\cup i,F\cup j) + h(S,F) \\ \left. - h (S \cup i, F) - h (S, F \cup j) \right] \\ = \frac{1}{nm}\sum_{s = 1}^{n}\sum_{f = 1}^{m}\sum_{\substack{S\subseteq N\setminus i\\ |S| = s - 1}}\sum_{\substack{F\subseteq M\setminus j\\ |F| = f - 1}}\frac{(s - 1)!(n - s)!}{(n - 1)!}\cdot \frac{(f - 1)!(m - f)!}{(m - 1)!}[h(S\cup i,F\cup j) + h(S,F) \\ \left. - h (S \cup i, F) - h (S, F \cup j) \right] \\ = \frac {1}{n m} \sum_ {s = 1} ^ {n} \sum_ {f = 1} ^ {m} \frac {1}{\binom {n - 1} {s - 1} \binom {m - 1} {f - 1}} \sum_ {(S, F) \in D _ {s f} ^ {i j}} [ h (S \cup i, F \cup j) + h (S, F) - h (S \cup i, F) - h (S, F \cup j) ] \\ = \frac {1}{n m} \sum_ {s = 1} ^ {n} \sum_ {f = 1} ^ {m} \Delta_ {s f}. \\ \end{array}
613
+ $$
614
+
615
+ Now we prove the solution Eq. (20) is unique.
616
+
617
+ Convert the Eq. (19) to matrix equations in the form of
618
+
619
+ $$
620
+ A \mathbf {x} = \mathbf {b},
621
+ $$
622
+
623
+ where
624
+
625
+ $$
626
+ \mathbf {x} ^ {T} = \left(p _ {0, 0}, p _ {0, 1}, \dots , p _ {0, m - 1}, p _ {1, 0}, p _ {1, 1}, \dots , p _ {1, m - 1}, \dots , p _ {n - 1, 0}, \dots , p _ {n - 1, m - 1}\right) _ {1 \times n m},
627
+ $$
628
+
629
+ $$
630
+ \mathbf {b} ^ {T} = (0, 0, 0, \dots , 0, 1) _ {1 \times n m},
631
+ $$
632
+
633
+ and
634
+
635
+ $$
636
+ \boldsymbol {A} = \binom {\boldsymbol {A} _ {1}} {\boldsymbol {A} _ {2}} _ {n m \times n m}, \tag {21}
637
+ $$
638
+
639
+ where
640
+
641
+ $$
642
+ A _ {1} = \left( \begin{array}{c c c c c} A _ {(m - 1) \times m} ^ {0} & O _ {(m - 1) \times m} & \dots & \dots & O _ {(m - 1) \times m} \\ A _ {m \times m} ^ {1} & B _ {m \times m} ^ {1} & O _ {m \times m} & \dots & O _ {m \times m} \\ O _ {m \times m} & A _ {m \times m} ^ {2} & B _ {m \times m} ^ {2} & \dots & O _ {m \times m} \\ \vdots & \vdots & \ddots & \ddots & \vdots \\ O _ {m \times m} & O _ {m \times m} & \dots & A _ {m \times m} ^ {n - 1} & B _ {m \times m} ^ {n - 1} \end{array} \right) _ {(n m - 1) \times n m},
643
+ $$
644
+
645
+ $$
646
+ \boldsymbol {A} _ {2} = \left(0, 0, \dots , 0, n m\right) _ {1 \times n m}.
647
+ $$
648
+
649
+ And
650
+
651
+ $$
652
+ \boldsymbol {A} _ {(m - 1) \times m} ^ {0} = \left( \begin{array}{c c c c c c} 1 & - (m - 1) & 0 & \dots & \dots & 0 \\ 0 & 2 & - (m - 2) & 0 & \dots & 0 \\ 0 & 0 & 3 & - (m - 3) & \dots & 0 \\ \vdots & \vdots & \vdots & \ddots & \ddots & \vdots \\ 0 & 0 & 0 & \dots & m - 1 & - 1 \end{array} \right) _ {(m - 1) \times m},
653
+ $$
654
+
655
+ $$
656
+ \boldsymbol {A} _ {m \times m} ^ {j} = \left( \begin{array}{c c c c c c} j & 0 & 0 & \dots & \dots & 0 \\ j & - j \cdot (m - 1) & 0 & 0 & \dots & 0 \\ 0 & 2 j & - j \cdot (m - 2) & 0 & \dots & 0 \\ 0 & 0 & 3 j & - j \cdot (m - 3) & \dots & 0 \\ \vdots & \vdots & \vdots & \ddots & \ddots & \vdots \\ \vdots & \vdots & \vdots & \vdots & \vdots & \vdots \\ 0 & 0 & 0 & \dots & j \cdot (m - 1) & - j \end{array} \right) _ {m \times m}, 1 \leq j \leq n - 1,
657
+ $$
658
+
659
+ $$
660
+ \boldsymbol {B} _ {m \times m} ^ {j} = \left( \begin{array}{c c c c c c} - (n - j) & 0 & 0 & \dots & \dots & 0 \\ - (n - j) & (n - j) \cdot (m - 1) & 0 & 0 & \dots & 0 \\ 0 & - 2 \cdot (n - j) & (n - j) \cdot (m - 2) & 0 & \dots & 0 \\ 0 & 0 & - 3 \cdot (n - j) & (n - j) \cdot (m - 3) & \dots & 0 \\ \vdots & \vdots & \vdots & \ddots & \ddots & \vdots \\ \vdots & \vdots & \vdots & \ddots & \ddots & \vdots \\ 0 & 0 & 0 & \dots & - (m - 1) \cdot (n - j) & n - j \end{array} \right) _ {m \times m}, 1 \leq j \leq n - 1.
661
+ $$
662
+
663
+ For example, if $n = m = 3$ , then
664
+
665
+ $$
666
+ \boldsymbol {A} = \left( \begin{array}{c c c c c c c c c} 1 & - 2 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 2 & - 1 & 0 & 0 & 0 & 0 & 0 \\ \hline 1 & 0 & 0 & - 2 & 0 & 0 & 0 & 0 \\ 1 & - 2 & 0 & - 2 & 4 & 0 & 0 & 0 \\ 0 & 2 & - 1 & 0 & - 4 & 2 & 0 & 0 \\ \hline 0 & 0 & 0 & 2 & 0 & 0 & - 1 & 0 \\ 0 & 0 & 0 & 2 & - 4 & 0 & - 1 & 2 \\ 0 & 0 & 0 & 0 & 4 & - 2 & 0 & - 2 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \end{array} \right) _ {9 \times 9}
667
+ $$
668
+
669
+ Convert $\mathbf{A}$ to $\hat{\mathbf{A}}$ by using the elementary column and row transformation,
670
+
671
+ $$
672
+ \hat {\boldsymbol {A}} = \left( \begin{array}{c c c c c c c c c} 1 & - 2 & 0 & 2 & - 4 & 0 & 1 & - 2 & 0 \\ 0 & 2 & - 1 & 0 & 4 & - 2 & 0 & 2 & - 1 \\ \hline 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 1 & - 2 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & 2 & - 1 & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 1 & - 2 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 2 & - 1 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \end{array} \right) _ {9 \times 9}
673
+ $$
674
+
675
+ According to the property of the elementary row and column transformation,
676
+
677
+ $$
678
+ \operatorname {R a n k} (\boldsymbol {A}) = \operatorname {R a n k} (\hat {\boldsymbol {A}}).
679
+ $$
680
+
681
+ Consider equation
682
+
683
+ $$
684
+ \hat {A} \mathbf {x} = \mathbf {0},
685
+ $$
686
+
687
+ and the solution is only $\mathbf{x} = \mathbf{0}$ , hence
688
+
689
+ $$
690
+ \operatorname {R a n k} (\mathbf {A}) = \operatorname {R a n k} (\hat {\mathbf {A}}) = 9.
691
+ $$
692
+
693
+ In general, we can prove $\operatorname{Rank}(\mathbf{A}) = nm$ always holds for any $n \geq 1$ and $m \geq 1$ , (Make elementary column transformation for $[A_{m \times m}^j, B_{m \times m}^j]$ in the context of $\mathbf{A}$ with the order of $j = 1, 2, \ldots, n - 1$ .) Hence the solution of Eq. (19) is unique, which is shown in Eq. (20). And we can check Eq. (20) also satisfies Eq. (18), hence the solution of Eq. (18) is unique.
694
+
695
+ # C. Proof of Corollary 3.4
696
+
697
+ Proof. We use the same technique in the proof of Lemma B.3.
698
+
699
+ $$
700
+ \begin{array}{l} \psi_{i^{*}}^{1d}(h) = \sum_{j\in M}\sum_{\substack{S\subseteq N\setminus i\\ F\subseteq M\setminus j}}p_{s,f}[h(S\cup i,F\cup j) + h(S,F) - h(S\cup i,F) - h(S,F\cup j)] \\ = \sum_{\substack{S\subseteq N\setminus i\\ F\subset M}}h(S\cup i,F)[\sum_{j\in F}p_{s,f - 1} - \sum_{j\notin F}p_{s,f}] + h(S,F)[\sum_{j\notin F}p_{s,f} - \sum_{j\in F}p_{s,f - 1}] \\ = \sum_{\substack{S\subseteq N\setminus i\\ F\subseteq M}}(\sum_{j\in F}p_{s,f - 1} - \sum_{j\notin F}p_{s,f})[h(S\cup i,F) - h(S,F)] \\ = \sum_ {S \subseteq N \backslash i} \left(\sum_ {j \in M} p _ {s, m - 1}\right) [ h (S \cup i, M) - h (S, M) ]. \\ \end{array}
701
+ $$
702
+
703
+ Substitute Eq. (20) into the above equation and we get the conclusion. The similar argument can be applied to $\psi_{j}^{1d}$ .
704
+
705
+ # D. Permutation-based 2D-Shapley Formulation
706
+
707
+ To compute 2D-Shapley more efficient, we propose the following corollary.
708
+
709
+ Corollary D.1. Eq. (8) has an equivalent form as follows:
710
+
711
+ $$
712
+ \psi_ {i j} ^ {2 d} = \frac {1}{n m} \sum_ {\substack {S \subseteq N \backslash i \\ F \subseteq M \backslash j}} \frac {\left[ h (S \cup i , F \cup j) + h (S , F) - h (S \cup i , F) - h (S , F \cup j) \right]}{\binom {n - 1} {| S |} \binom {m - 1} {| F |}}, \tag{22}
713
+ $$
714
+
715
+ or
716
+
717
+ $$
718
+ \psi_ {i j} ^ {2 d} = \frac {1}{n ! m !} \sum_ {\substack {\pi_ {1} \in \Pi (N) \\ \pi_ {2} \in \Pi (M)}} [ h \left(P _ {i} ^ {\pi_ {1}} \cup i, P _ {j} ^ {\pi_ {2}} \cup j\right) + h \left(P _ {i} ^ {\pi_ {1}}, P _ {j} ^ {\pi_ {2}}\right) - h \left(P _ {i} ^ {\pi_ {1}} \cup i, P _ {j} ^ {\pi_ {2}}\right) - h \left(P _ {i} ^ {\pi_ {1}}, P _ {j} ^ {\pi_ {2}} \cup j\right) ], \tag{23}
719
+ $$
720
+
721
+ where $\Pi(A)$ denotes a set of all permutations of $A$ and $P_k^\pi$ a set of all elements of $A$ that precede $k \in A$ in the permutation $\pi \in \Pi(A)$ .
722
+
723
+ The formulation in Eq. (22) is a simple derivation from Eq. (8) that sums marginal contributions over all subsets. Whereas, the second formulation in Eq. (23) sums over all sample and feature permutations, and the marginal contribution of block $(i,j)$ is weighted by a coefficient that measures all orderings of samples appearing before and after sample $i$ and all orderings of features appearing before and after feature $j$ . This corollary gives a simple expression of 2D-Shapley. Using this equivalent formulation, we can design efficient algorithms for 2D-Shapley implementation.
724
+
725
+ # E. Algorithm Details
726
+
727
+ Here, we explain the implementation of algorithms and explore ways to achieve efficient computation.
728
+
729
+ # E.1. Saving Computation in 2D-Shapley-MC
730
+
731
+ First, we focus on 2D-Shapley-MC. Apart from Monte Carlo sampling on both sample and feature permutations to reduce complexity, we also reduce the number of model training to a single time for each counterfactual evaluation as opposed to 4, which is derived in Eq. 2. Let us observe that in the marginal contribution equation, we have 4 utility terms, but actually, 3 of them are already computed, which we can reuse them. We take a pair $(i,j)$ as an example. For the marginal contribution of $(i,j)$ , we have 4 utility terms to compute: $h(S\cup i,F\cup j), h(S,F\cup j), h(S\cup i,F), h(S,F)$ . However, we notice that $h(S,F\cup j)$ was already computed for a pair $(i - 1,j), h(S\cup i,F)$ for a pair $(i,j - 1)$ , and $h(S,F)$ for $(i - 1,j - 1)$ . Therefore, by saving these evaluations, we can reduce the total number of model training by $75\%$ . Saving all model evaluations for every block might overflow the memory. However, we only need to save the utilities of the previous and current rows (columns) if we are looping horizontally downwards (vertically rightwards), which promotes efficient memory usage. Additionally, our algorithm can be parallelized. In particular, every permutation can be computed independently and combined at the last stage, which is the "while loop" in Algorithm 1.
732
+
733
+ # E.2. Limitations of 2D-Shapley-MC and Possible Improvements
734
+
735
+ One limitation of the Monte Carlo method is time complexity which scales with the number of rows and columns in an aggregate data matrix. To improve the efficiency of 2D-Shapley-MC, we can reduce the burden on model retraining of 2D-Shapley-MC to lower the computation cost. For example, there exist highly efficient methods for model re-training, such as FFCV [1,2], which has been applied in Datamodels [3] and can significantly reduce computation complexity. Another limitation is that 2D-Shapley-MC relies on the performance scores associated with models trained on different subsets to determine the cell values. However, these values are susceptible to noise due to training stochasticity when the learning algorithm is randomized (e.g., SGD) (Wang & Jia, 2022). To overcome these limitations, we proposed an efficient, nearest-neighbor-based method, 2D-Shapley-KNN, which involves no model training and only requires sorting data. With this method, we also avoid the problem of model training stochasticity, which 2D-Shapley-MC is facing with. Another advantage of 2D-Shapley-KNN is that it has an explicit formulation for sample values and only requires permuting over features. This method not only
736
+
737
+ beats 2D-Shapley-MC by an order of magnitude in terms of computational efficiency but is straightforward to compute and only requires CPU resources.
738
+
739
+ # E.3. Saving Computation in 2D-Shapley-KNN
740
+
741
+ Apart from removing the dependency on the sample permutations and all model training, 2D-Shapley-KNN can further be reduced in computation. Similar to the 2D-Shapley-MC, we here also save the utility terms, as shown in Algorithm 2. For each pair $(i,j)$ , we need to compute $SV_{KNN}(i,P_j^\pi \cup k)$ and $SV_{KNN}(i,P_j^\pi)$ . However, the second term was already calculated for the previous feature in $\pi$ prior to $j$ . Thus, we can reduce the total number of $SV_{KNN}$ evaluations by $50\%$ .
742
+
743
+ Algorithm 1 2D-Shapley-MC Valuation Algorithm.
744
+ Input: Training Set $D$ , Learning Algorithm $\mathcal{A}$ , Test Set $T$ , Utility Function $h$ . Output: Sample-Feature 2D Shapley Values $\psi^{2d}$ .
745
+ Ensure: $\forall i,j$ , $\psi_{ij}^{2d} = 0$ ; $t = 0$ .
746
+ while $\psi^{2d}$ not converged do
747
+ $\pi_N \gets$ Random Samples Permutation
748
+ $\pi_M \gets$ Random Features Permutation
749
+ $u \gets 0 //$ Utility Matrix
750
+ for $i,j$ in range $(\pi_N), \text{range}(\pi_M)$ do
751
+ $s \gets \pi_N(i), f \gets \pi_M(j)$ $u[s,f] \gets h\left(P_s^{\pi_N} \cup \{s\}, P_f^{\pi_M} \cup \{f\}\right)$ $\psi_{sf}^{new} \gets u[s,f] + u[\pi_N(i - 1), \pi_M(j - 1)] - u[\pi_N(i), \pi_M(j - 1)] - u[\pi_N(i - 1), \pi_M(j)]$ $\psi_{sf}^{2d} \gets \frac{t}{t + 1} \psi_{sf}^{2d} + \frac{1}{t + 1} \psi_{sf}^{new}$
752
+ end
753
+ End
754
+ Set $t \gets t + 1$
755
+
756
+ Algorithm 2 2D-Shapley-KNN Valuation Algorithm.
757
+ Input: Training Set $D$ Test Set $T$ Top $K$
758
+ Output: Sample-Feature 2D Shapley Values $\psi^{2d}$
759
+ Ensure: $\forall_{i,j},\psi_{ij}^{2d} = 0;t = 0$
760
+ while $\psi^{2d}$ not converged do
761
+ $\pi_M\gets$ Random Features Permutation
762
+ $u\gets 0 / / SV_{knn}$ values
763
+ for $j$ in range $(\pi_M)$ do
764
+ $f\gets \pi_M(j)$ $u[f]\gets SV_{KNN}(N,P_m^{\pi_M}\cup \{f\} ,T)$ $\psi_{sf}^{new}\gets u[f]_s - u[\pi_M(j - 1)]_s$ $\psi_{sf}^{2d}\gets \frac{t}{t + 1}\psi_{sf}^{2d} + \frac{1}{t + 1}\psi_{sf}^{new}$
765
+ end
766
+ Set $t\gets t + 1$
767
+ end
768
+
769
+ # E.4. Actual Runtime Complexity
770
+
771
+ Time complexity is an important aspect when evaluating the efficiency of algorithms. In our case, we focus on determining the runtime of our methods for different number of cell valuations on the Census dataset until the values' convergence is achieved. While computing the runtime for the exact 2D Shapley runtime, we encounter a challenge due to the exponential growth of permutations with the cell size, making exact 2D Shapley intractable to compute. To address this, we benchmark the exact 2D Shapley runtime, by measuring the runtime for a
772
+
773
+ single permutation and scale it by the total number of permutations needed for the exact 2D Shapley. As we observe in Table 1, 2D-Shapley-KNN, exhibits exceptional efficiency compared to 2D-Shapley-MC across various cell valuations on the Census dataset. At 1,000 cells valuation, 2D-Shapley-KNN was at least 25 times faster than 2D-Shapley-MC, showcasing a substantial advantage. Furthermore, as the number of cells increased to 100,000, 2D-Shapley-KNN demonstrates a remarkable speed advantage, being approximately 300 times faster than 2D-Shapley-MC. These findings clearly establish an advantage of 2D-Shapley-KNN over 2D-Shapley-MC in terms of runtime efficiency. Moreover, we observe that both 2D-Shapley-KNN and 2D-Shapley-MC outperform the exact 2D Shapley method in terms of runtime. These results highlight the effectiveness and practicality of our approach for computing 2D-Shapley in real-world cases.
774
+
775
+ <table><tr><td>Method</td><td>1K</td><td>5K</td><td>10K</td><td>20K</td><td>50K</td><td>100K</td></tr><tr><td>2D Shapley-Exact (Theoretical)</td><td>1.5E+301s</td><td>2.0E+1505s</td><td>2.8E+3010s</td><td>5.6E+6020s</td><td>4.4E+15051s</td><td>1.4E+30103s</td></tr><tr><td>2D-Shapley-MC</td><td>280s</td><td>1,661s</td><td>3,127s</td><td>9,258s</td><td>17,786s</td><td>26,209s</td></tr><tr><td>2D-Shapley-KNN</td><td>11s</td><td>25s</td><td>37s</td><td>44s</td><td>53s</td><td>88s</td></tr></table>
776
+
777
+ # F. Implementation Details & Results
778
+
779
+ # F.1. Details on Datasets and Models
780
+
781
+ For our experiments, we use the following datasets from Machine Learning Repository (Dua & Graff, 2017):
782
+
783
+ Table 1: Actual runtime comparison between 2D-Shapley methods.
784
+
785
+ <table><tr><td>Dataset</td><td>Training Data</td><td>Test Data</td><td>Features</td></tr><tr><td>Census Income</td><td>32561</td><td>16281</td><td>14</td></tr><tr><td>Default of Credit Card Clients</td><td>18000</td><td>12000</td><td>24</td></tr><tr><td>Heart Failure</td><td>512</td><td>513</td><td>13</td></tr><tr><td>Breast Cancer Wisconsin (Original)</td><td>242</td><td>241</td><td>10</td></tr><tr><td>Wine Dataset</td><td>106</td><td>72</td><td>13</td></tr></table>
786
+
787
+ Table 2: Details on datasets used in experiments.
788
+
789
+ In Breast Cancer Wisconsin dataset, we removed "ID number" from the list of features as it was irrelevant for model training.
790
+
791
+ For methods requiring model training, 1D-Shapley, Random, and 2D-Shapley-MC, we implemented a decision tree classifier on all of them.
792
+
793
+ Empirically, we verified that for each of the method, the cell values converge within 500 permutations and that is the number we decide to use to run these methods.
794
+
795
+ Due to varying sizes of each dataset with different number of features, we set a different number of cells to be removed at a time. For bigger datasets, Census Income and Credit Default, we remove ten cells at a time, and for a smaller dataset, Breast Cancer, we remove one cell at a time.
796
+
797
+ # F.2. Additional Results on Sanity check of cell-wise values experiment
798
+
799
+ We provide results on additional datasets, Heart Failure and Wine Dataset, to demonstrate the effectiveness of 2D-Shapley in cell-wise valuation. We additionally include the 2D LOO baseline for comparison. As we can observe in Figure 8, 2D LOO performance is comparable to or worse than the Random baseline. One of the main reasons is that 2D LOO only valuates a cell's contribution when all other cells are present. This means that after the sequential removal of some cells, the values obtained from 2D LOO may no longer accurately represent the importance of the cells. In contrast, our method computes a cell's value by averaging its contribution over various sample and feature subset sizes, which ensures our cell values are informative even after the sequential
800
+
801
+ ![](images/78ecdb3b8470b31be2b49d4158e32795aae2a83bfb03532dc8c37f452f4cd9e8.jpg)
802
+ Figure 8: 2D-Shapley values for benign patients in the original breast cancer dataset. The green border denotes a cell before an outlier value has been injected to that cell.
803
+
804
+ removal of a certain amount of cells, thereby addressing the shortcomings of 2D LOO and leading to improved performance in cell-wise valuation.
805
+
806
+ # F.3. Additional Details and Results on Fine-Grained Outlier Localization experiment
807
+
808
+ # F.3.1. Outlier Value Generation
809
+
810
+ Our outlier generation technique is inspired by (Du et al., 2022). Specifically, for a random cell with a sample index $i$ and a feature index $j$ , we generate an outlier value based on its feature $j$ . We first recreate a distribution of the feature $j$ and then sample a value from a low-probability-density region, below $5\%$ in our experiment.
811
+
812
+ # F.3.2. Heatmaps Comparison
813
+
814
+ To better understand the detection rate of outlier values, we visualize them through a heatmap. In Figure 9, we provide a 2D-Shapley heatmap of the original dataset before outlier injection and compare with a 2D-Shapley heatmap in Figure 10 after injecting outliers. Due to dimensional reasons, we transpose the heatmap, where the rows represent features and the columns denote the samples.
815
+
816
+ We observe through the breast cancer dataset that the cells with injected outliers have changed their values and lie mostly in the lower range of 2D-Shapley values. However, we can also notice that other cells are also affected by the outliers and the overall range of values has increased in both directions.
817
+
818
+ In addition, we present a heatmap with injected outliers generated by 1D-Shapley to provide insights into the 1D-Shapley detection performance, which we show in Figure 5A). As we can observe the 1D-Shapley heatmap in Figure 11, the values of injected outliers are scattered which explains why the detection rate by 1D-Shapley was suboptimal.
819
+
820
+ # F.3.3. Ablation Study on the Budget of Inserted Outliers
821
+
822
+ In Figure 5A), we injected outlier values to $2\%$ of total cells. Here, we explore whether our 2D-Shapley method can still detect outliers on various different amounts of outliers. Thus, we randomly inject $1\%$ , $2\%$ , $5\%$ , $10\%$ , $15\%$ of outlier values to the original breast cancer dataset and plot the detection rate.
823
+
824
+ As we observe in Figure 12, the detection rate of outliers is very high within the first 200 inspected cells for every outlier injection rate. Further, we observe that with more outliers added to the dataset, our detection rate slightly decreases. It is indeed reasonable, since as we inject more outliers in the dataset, the less uncommon these outliers are.
825
+
826
+ ![](images/d14ff787e122c8da6b7e6179c789ec124fa02ea2680d1dca1a1475f6588c0b84.jpg)
827
+ Figure 9: 2D-Shapley values for benign patients in the original breast cancer dataset. The green border denotes a cell before an outlier value has been injected to that cell.
828
+
829
+ ![](images/5a98fd985c9b0af6ca37951a60d171884c0147fc48871726b7d35f1d6f52652e.jpg)
830
+ Figure 10: 2D-Shapley values for benign patients in the breast cancer dataset with randomly inserted outliers. The green border denotes a cell after an outlier value has been injected to that cell.
831
+
832
+ # F.4. Additional Details on Sub-matrix Valuation experiment
833
+
834
+ For the plots in Figure 6, we have randomly split the Credit Default dataset into blocks. One of the random split is pictured in Figure 13. We randomly moved the horizontal and vertical lines and permuted separately rows and columns to create different possibilities for block splits.
835
+
836
+ # F.5.Hardware
837
+
838
+ In this work, we used an 8-Core Intel Xeon Processor E5-2620 v4 @ 2.20Ghz CPU server as a hardware platform.
839
+
840
+ # F.6. Code
841
+
842
+ The code repository is available via this link https://github.com/ruoxi-jia-group/2dshapley.
843
+
844
+ ![](images/96a2b9d4fd8064a9264eb5f6695551812967822a3cca26d91ec92e0c82b300af.jpg)
845
+ Figure 11: 1D-Shapley values for benign patients in the breast cancer dataset with randomly inserted outliers. The green border denotes a cell after an outlier value has been injected to that cell.
846
+
847
+ ![](images/b69eb6335dbaca03de64bcc0f30ad89776be827a8cd74f1c3ee62bf7e3ebf66b.jpg)
848
+ Figure 12: 2D-Shapley Detection rate of randomly inserted outliers in the breast cancer dataset over various injection rates.
849
+
850
+ ![](images/dd0f25ee93b07ac12481f86ce97b6065fa0ec58c83c85ff2c6087071ddd1fc07.jpg)
851
+ Figure 13: An example of a dataset split into blocks.
2dshapleyaframeworkforfragmenteddatavaluation/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7128a2f2231165cd5de455985f66e11fcf8f4f844df610426f039ea177fd6f84
3
+ size 1243236
2dshapleyaframeworkforfragmenteddatavaluation/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d044aa955afe579ee82ff09957cc95472e70773bdddf9c62485996d7fa9c96a9
3
+ size 962652
acategorytheoreticalmetaanalysisofdefinitionsofdisentanglement/e2d04a42-5377-4c90-b24a-a1cebb6cf79b_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:877afc51bfe61f0b0d39620cc21e34b10f84959a7bc9141a392a235d4f1574e1
3
+ size 152524
acategorytheoreticalmetaanalysisofdefinitionsofdisentanglement/e2d04a42-5377-4c90-b24a-a1cebb6cf79b_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa952ebd96316bb5055aaabbee7b241374dbf96d46dc81b96186780e1dcdcb60
3
+ size 184646
acategorytheoreticalmetaanalysisofdefinitionsofdisentanglement/e2d04a42-5377-4c90-b24a-a1cebb6cf79b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6dd6bdea3cf8b0b49ca42a575b5c3dbae0d38b428d2c4ec295acde8d3442247f
3
+ size 584643
acategorytheoreticalmetaanalysisofdefinitionsofdisentanglement/full.md ADDED
@@ -0,0 +1,826 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A Category-theoretical Meta-analysis of Definitions of Disentanglement
2
+
3
+ Yivan Zhang $^{12}$ Masashi Sugiyama $^{21}$
4
+
5
+ # Abstract
6
+
7
+ Disentangling the factors of variation in data is a fundamental concept in machine learning and has been studied in various ways by different researchers, leading to a multitude of definitions. Despite the numerous empirical studies, more theoretical research is needed to fully understand the defining properties of disentanglement and how different definitions relate to each other. This paper presents a meta-analysis of existing definitions of disentanglement, using category theory as a unifying and rigorous framework. We propose that the concepts of the cartesian and monoidal products should serve as the core of disentanglement. With these core concepts, we show the similarities and crucial differences in dealing with (i) functions, (ii) equivariant maps, (iii) relations, and (iv) stochastic maps. Overall, our meta-analysis deepens our understanding of disentanglement and its various formulations and can help researchers navigate different definitions and choose the most appropriate one for their specific context.
8
+
9
+ # 1. Introduction
10
+
11
+ Disentanglement, in machine learning, refers to the ability to identify and separate the underlying factors that contribute to a particular variation in data (Bengio et al., 2013). It is a process of breaking down a complex phenomenon into simpler components. It has been suggested that disentangled representation learning is a promising way toward reliable, interpretable, and data-efficient machine learning (Locatello et al., 2019; Montero et al., 2020; Dittadi et al., 2021).
12
+
13
+ Because disentanglement is an important concept, many researchers have approached this problem from different angles, resulting in various definitions, metrics, methods,
14
+
15
+ <sup>1</sup>The University of Tokyo, Tokyo, Japan <sup>2</sup>RIKEN AIP, Tokyo, Japan. Correspondence to: Yivan Zhang <yivanzhang@ms.k.u-tokyo.ac.jp>.
16
+
17
+ Proceedings of the $40^{th}$ International Conference on Machine Learning, Honolulu, Hawaii, USA. PMLR 202, 2023. Copyright 2023 by the author(s).
18
+
19
+ and models. Some definitions are based on the intuition that: (1. modularity) a change in one factor should lead to a change in a single code; (2. compactness/completeness) a factor should be associated with only one code; and (3. explicitness/informativeness) the code should be able to predict the factor (Ridgeway & Mozer, 2018; Eastwood & Williams, 2018). Another line of research is based on group theory and representation theory (Cohen & Welling, 2014; 2015; Higgins et al., 2018), where the mapping from the data to the code is required to be equivariant to product group actions, preserving the product structure of automorphisms (a.k.a. symmetries). Meanwhile, information theory (Chen et al., 2018) and invariance (Higgins et al., 2017) also play an important role in characterizing disentanglement.
20
+
21
+ Then why do we want to conduct a meta-analysis? Because we study the theories and techniques of disentanglement, yet our definitions of it are quite entangled. Although large-scale experimental studies exist (Locatello et al., 2019), theoretical analyses and systematic comparisons are limited (Seplierskaia et al., 2019; Carbonneau et al., 2022). Several important questions remain to be answered:
22
+
23
+ - What are the defining properties of disentanglement?
24
+ - What operations and structures are essential, and what are specific to the task?
25
+ - Given two definitions or metrics, does one imply the other in any situation?
26
+ - Are the existing algebraic and statistical approaches compatible with one another?
27
+
28
+ Things quickly become complicated without an abstract language to describe existing results.
29
+
30
+ Category theory (Borceux, 1994; Awodey, 2006; Leinster, 2014) is particularly suitable for designing and organizing a system of this level of complexity. It has found applications in many scientific fields (Baez, 2017; Bradley, 2018; Fong & Spivak, 2019), recently also in machine learning (Gavranovic, 2019; de Haan et al., 2020; Shiebler et al., 2021; Dudzik & Velicković, 2022). In this work, we aim to disentangle the definitions of disentanglement from a categorical perspective.
31
+
32
+ In Section 2, we first introduce the essential concepts of the cartesian product and monoidal product, which we argue should be the core of disentanglement. Next, we look into
33
+
34
+ the requirements based on examples and counterexamples through Sections 3 to 6. We use the categories of (1. Set) sets and functions to define the concepts of modularity and explicitness as the defining properties of disentanglement (Ridgeway & Mozer, 2018); (2. [S, Set]) functors and natural transformations to generalize to actions of an algebra (monoid, group, etc.) and equivariant maps (Higgins et al., 2018); (3. Rel) sets and relations as an example of a symmetric monoidal category; and (4. Stoch) measurable spaces and stochastic maps to introduce the concept of the Markov category (Fritz, 2020) and explain how we should use the copy/delete/projection operations to characterize disentanglement. A full-blown example is given in the end.
35
+
36
+ It is worth clarifying that this paper does not discuss metrics, models, methods, supervision, and learnability. Also, our contribution is not to category theory itself, as the math we used is not new. However, our work shows how category theory can transfer and integrate knowledge across disciplines and how abstract definitions can simplify a complex system (Baez, 2017). We hope our work is an initial step toward a full understanding of disentanglement.
37
+
38
+ # 2. Product: Core of Disentanglement
39
+
40
+ In this section, we briefly review two important categorical concepts — the cartesian product and monoidal product, which are the core of the disentanglement. We will omit many basic concepts such as the category, functor, natural transformation, and monad. Note that we frequently use commutative diagrams (Awodey, 2006) and string diagrams (Selinger, 2010) as graphical calculus (See Appendix A.1).
41
+
42
+ # 2.1. Cartesian Category
43
+
44
+ Let us dive into the definition of the (cartesian) product:
45
+
46
+ Definition 1 (Product). In any category $\mathbf{C}$ , a product of two objects $A$ and $B$ is an object $A \times B$ , together with two morphisms $A \xleftarrow{p_1} A \times B \xrightarrow{p_2} B$ , called projections, satisfying the universal property:
47
+
48
+ $$
49
+ A \xleftarrow {\begin{array}{c} f _ {1} \\ \downarrow \end{array} } A \times B \xrightarrow [ p _ {2} ]{C} B \tag {1}
50
+ $$
51
+
52
+ Given any object $C$ and morphisms $A \xleftarrow{f_1} C \xrightarrow{f_2} B$ , there exists a unique morphism $\langle f_1, f_2 \rangle : C \to A \times B$ , called a paring of $f_1$ and $f_2$ , such that $f_1 = p_1 \circ \langle f_1, f_2 \rangle$ and $f_2 = p_2 \circ \langle f_1, f_2 \rangle$ .
53
+
54
+ The gist is that any morphism $C \xrightarrow{f} A \times B$ to a product is merely a pair of component morphisms $A \xleftarrow{f_1} C \xrightarrow{f_2} B$ , and all such morphisms arise this way. However, note that a morphism $A \times B \to C$ from a product can depend on both components.
55
+
56
+ We will be needing the following definitions and properties:
57
+
58
+ The product morphism of $f: A \to C$ and $g: B \to D$ is defined as $f \times g: A \times B \to C \times D := \langle f \circ p_1, g \circ p_2 \rangle$ , which makes product $\times: \mathbf{C} \times \mathbf{C} \to \mathbf{C}$ a bifunctor.
59
+ - The diagonal morphism of an object $A$ is defined as $\Delta_A: A \to A \times A \coloneqq \langle \mathrm{id}_A, \mathrm{id}_A \rangle$ , which "duplicates" $A$ .
60
+ - The terminal object 1, if exists, is the unit of the product: for any object $A$ , there is a unique terminal morphism $e_A : A \to 1$ , which " deletes" $A$ , and $A \times 1 \cong A \cong 1 \times A$ .
61
+ The product is associative up to isomorphism $\alpha_{A,B,C}:(A\times B)\times C\cong A\times (B\times C):=\langle p_1\circ p_1,p_2\times\mathrm{id}_C\rangle$ , which allows us to define products $\prod_{i = 1}^{N}A_{i} = A_{1}\times \dots \times A_{N}$ and projections $p_i:\prod_{i = 1}^N A_i\to A_i$ for $N\geq 2$ objects. We use subscript $f_{i}\coloneqq p_{i}\circ f$ as an abbreviation.
62
+ The product is commutative up to isomorphism $\beta_{A,B}: A \times B \cong B \times A := \langle p_2, p_1 \rangle$ .
63
+
64
+ A cartesian category is a category with all finite products, i.e., all binary products and a terminal object.
65
+
66
+ # 2.2. Monoidal Category
67
+
68
+ Having all products is sometimes too strong a condition. Besides, the product, if exists, is not always an appropriate concept for disentanglement. Therefore, sometimes we need to consider a weaker notion of the "product":
69
+
70
+ Definition 2 (Symmetric monoidal category). A symmetric monoidal category $(\mathbf{C},\otimes ,I)$ is a category $\mathbf{C}$ equipped with a monoidal product $\otimes :\mathbf{C}\times \mathbf{C}\to \mathbf{C}$ and a monoidal unit $I$ which is unital, associative, and commutative up to natural isomorphisms and subject to some coherence conditions.
71
+
72
+ The monoidal products are weaker because they do not need to satisfy the universal property, so there are no canonical projections anymore. A cartesian (monoidal) category is a symmetric monoidal category whose monoidal product is given by the cartesian product. However, many interesting monoidal categories are not cartesian.
73
+
74
+ Some symmetric monoidal categories have extra structures or properties, including
75
+
76
+ - monoidal category with diagonals $\Delta_A: A \to A \otimes A$ , which is natural in $A$ if
77
+
78
+ $$
79
+ \begin{array}{c c} A \xrightarrow {f} B \\ \Delta_ {A \downarrow} & \downarrow \Delta_ {B} \\ A \otimes A \xrightarrow {f \otimes f} B \otimes B \end{array} \quad \boxed {f} \boxed {f} = \boxed {f} (2)
80
+ $$
81
+
82
+ - semicartesian (monoidal) category, whose monoidal unit $I$ is a terminal object:
83
+
84
+ $$
85
+ \begin{array}{c c} A \xrightarrow {f} B \\ e _ {A} \searrow \searrow e _ {B} \\ I \end{array} \quad \boxed {f} = \quad (3)
86
+ $$
87
+
88
+ - monoidal category with projections $\pi_1: A \otimes B \to A$ and $\pi_2: A \otimes B \to B$ (Franz, 2002; Leinster, 2016), and
89
+ - Markov category (Fritz, 2020, Definition 2.1).
90
+
91
+ They have the following relationship:
92
+
93
+ $$
94
+ \begin{array}{l} \text {c a r t e s i a n} \subset \text {M a r k o v} \subset \text {s e m i c a r t e s i a n} \\ \cap \quad \parallel \tag {4} \\ \end{array}
95
+ $$
96
+
97
+ $$
98
+ \begin{array}{l} \text {d i a g o n a l s} \subset \text {m o n o i d a l} \supset \text {p r o j e c t i o n s} \end{array}
99
+ $$
100
+
101
+ These structures and properties will be important in the rest of this paper.
102
+
103
+ # 3. Sets and Functions
104
+
105
+ Equipped with these concepts, let us now look at the definitions of disentanglement. Set, the category of sets and functions, serves as our primary example. Set is cartesian, whose product is given by the Cartesian product of sets.
106
+
107
+ We use $[1..N]$ to denote the set of numbers from 1 to $N$ . We use $\backslash i$ as an abbreviation of $[1..N] \setminus \{i\}$ , i.e., the set of numbers from 1 to $N$ except $i$ .
108
+
109
+ # 3.1. Generating Process
110
+
111
+ First, let us consider how the data is generated from a set of factors. If all combinations of factors are equally possible (cf. Section 5), we can assume that
112
+
113
+ Assumption 1. The set of factors $Y \coloneqq \prod_{i=1}^{N} Y_i$ is a product of $N$ sets.
114
+
115
+ Then, let $X$ be the set of observations. A generating process $g: Y \to X$ is simply a morphism from a product, i.e., a function with multiple inputs. It is an "entangling process" because we do not have any structural assumptions on $X$ . However, we need some basic requirements for $g$ to ensure that the analysis is meaningful. For starters, we assume that
116
+
117
+ Assumption 2. $g:Y\to X$ is a monomorphism.
118
+
119
+ This means that if two observations are the same, their underlying factors must be the same, too. This assumption avoids the model not satisfying a disentanglement definition simply because of a wrong choice of factors.
120
+
121
+ # 3.2. Encoding Process
122
+
123
+ Next, we consider how an encoding process $f: X \to Z$ can exhibit disentanglement and what desiderata are. Following Ridgeway & Mozer (2018) and Eastwood & Williams (2018), we call $Z$ the set of codes, which should also be a product. In this work, we consider a simple case where
124
+
125
+ Assumption 3. The codes $Z$ also have $N$ components, and the code projections $p_i: Z \to Z_i$ are known a priori.
126
+
127
+ Based on Assumption 3, we present our first definition:
128
+
129
+ Disentanglement 1 (A morphism to a product). In a category $\mathbf{C}$ , a disentangled encoding process is a morphism $f: X \to Z$ to a product $Z := \prod_{i=1}^{N} Z_i$ .
130
+
131
+ This is perhaps the minimal requirement for an encoder to
132
+
133
+ exhibit some level of disentanglement. It means that the encoder outputs multiple components, and we can extract each component without losing any information. Note that D. 1 does not even rely on the ground-truth factors $Y$ and a generating process $g$ .<sup>1</sup>
134
+
135
+ Let us now improve D. 1. A disentanglement requirement that many researchers agree on is modularity, such that "each code conveys information about at most one factor" (Ridgeway & Mozer, 2018). It is natural to consider the composition $m: Y \to Z := f \circ g$ of a generating process $g$ and an encoding process $f$ , which we call a code generating process (w.r.t. a given encoding $f$ ), while $g: Y \to X$ can be referred to as a data generating process. Then, modularity is a property of a code generating process:
136
+
137
+ Disentanglement 1.1. $m = \prod_{i=1}^{N}(m_{i,i}:Y_i\to Z_i)$ .
138
+
139
+ $$
140
+ \begin{array}{c c} \framebox {Z _ {1}} & Z _ {2} \\ \framebox {m: Y \to Z} \\ \framebox {Y _ {1}} & \framebox {Y _ {2}} & \framebox {Y _ {3}} \end{array} = \begin{array}{c c} \framebox {Z _ {1}} & Z _ {2} \\ \framebox {m _ {1 , 1}} & \framebox {m _ {2 , 2}} \\ \framebox {Y _ {1}} & \framebox {Y _ {2}} & \framebox {Y _ {3}} \end{array}
141
+ $$
142
+
143
+ "The $i$ -th code only encodes the $i$ -th factor."
144
+
145
+ Morphisms $m, m_i$ , and $m_{i,i}$ have the following relationship:
146
+
147
+ Proposition 1. $\forall i\in [1..N].m_i\coloneqq p_i\circ m = m_{i,i}\circ p_i$
148
+
149
+ $$
150
+ \begin{array}{c} Y \xrightarrow {m} Z \\ p _ {i} \Bigg \downarrow \quad \Bigg \downarrow m _ {i} \\ Y _ {i} \xrightarrow {m _ {i , i}} Z _ {i} \end{array} \tag {5}
151
+ $$
152
+
153
+ D. 1.1 is straightforward and intuitive, but there is one difficulty: it relies on the existence of some other morphisms. Given $m$ , verifying if $m_{i,i}$ exists is not trivial. Although, if D. 1.1 holds, we can construct $m_{i,i}$ from $m$ as follows:
154
+
155
+ Proposition 2. $\forall i\in [1..N].\forall y_{i}:1\to Y_{i}.m_{i,i} = Y_{i}\stackrel {\cong}{\longrightarrow}$
156
+
157
+ $$
158
+ 1 \times \dots \times Y _ {i} \times \dots \times 1 \xrightarrow {y _ {1} \times \dots \times \mathrm {i d} _ {Y _ {i}} \times \dots \times y _ {N}} Y \xrightarrow {m} Z \xrightarrow {p _ {i}} Z _ {i}.
159
+ $$
160
+
161
+ In words, we can choose other factors arbitrarily, and a modular encoder should give us the same code. This inspires us to have a more verifiable definition as follows.
162
+
163
+ A good property of Set is that it is cartesian closed, i.e., it has exponential objects, given by the sets of functions. Let $\widehat{m_i}: Y \backslash_i \to Z_i^{Y_i}$ be the exponential transpose (currying) of $m_i: Y \to Z_i$ . To check modularity, we can verify if
164
+
165
+ Disentanglement 1.2. $\widehat{m_i}$ is a constant morphism.
166
+
167
+ Therefore, we can obtain the exponential transpose first and check whether it is constant. Even better, we can guarantee that these definitions are equivalent:
168
+
169
+ Theorem 3. $D$ . 1.1 $\leftrightarrow D$ . 1.2.
170
+
171
+ Proof. Diagram chase.
172
+
173
+ ![](images/2147889c3b7819becabbe271e331437a888c7b09613f029e2bbfbc7cc704bf07.jpg)
174
+
175
+ ![](images/c255ed49138a74cde3b7a1eda774a1d9843df0b863d47ae51830ff4666fe0159.jpg)
176
+
177
+ Up to this point, we defined modularity in a cartesian closed category like Set. However, we point out that modularity alone is not sufficient:
178
+
179
+ Example (Constant). Let $Z$ be the terminal object $1^{N} \cong 1$ . The terminal morphism $e_{Y}: Y \to 1$ satisfies D. 1.1.
180
+
181
+ That is, an encoder sending everything to singletons is perfectly modular but also completely useless. Therefore, in addition to modularity, we should measure how useful and informative the codes are.
182
+
183
+ # 3.3. Decoding Process
184
+
185
+ This is where the concepts of explicitness (Ridgeway & Mozer, 2018) or informativeness (Eastwood & Williams, 2018) come in, meaning that "the factors can be precisely determined from the codes". It might be tempting to define explicitness as
186
+
187
+ Disentanglement 1.3. $f$ is an inverse of $g$ .
188
+
189
+ Then, the factors can be completely reconstructed from the observations. A drawback of D. 1.3 is that it requires the code set $Z$ to be the same as the factor set $Y$ , so $Y$ needs to be known during training. However, it is common that an encoder $f: X \to Z$ is trained with self-supervision or weak supervision (Shu et al., 2020; Wang et al., 2021), and the ground-truth factors $Y$ are only available during evaluation.
190
+
191
+ Therefore, we weaken the requirement and define the explicitness of a code generating process as
192
+
193
+ Disentanglement 1.4. $m$ is a split monomorphism.
194
+
195
+ ![](images/523f0b5a96d4cc2802bf9e30356c65661b4f3e342cd1076206f8ac618d25c0ab.jpg)
196
+
197
+ "The codes encode the factors faithfully."
198
+
199
+ This means that there exists a morphism $h: Z \to Y$ , which we call a decoding process, such that $h \circ m = \mathrm{id}_Y$ . In other words, $h$ is a retraction of $m$ . To summarize, we will focus on the following morphisms from now:
200
+
201
+ $$
202
+ Y \xrightarrow {- g (\text {g e n e r a t i n g}) \rightarrow X - f (\text {e n c o d i n g}) \rightarrow Z - h (\text {d e c o d i n g}) \rightarrow Y} \tag {7}
203
+ $$
204
+
205
+ Note that explicitness only indicates if the factors can be recovered. We may end up with entangled codes:
206
+
207
+ Example (Rotation). Let $Y$ be a vector space. A rotation is an invertible linear transformation and satisfies D. 1.4.
208
+
209
+ To avoid this, we may want the decoder to be modular, too. This property is related to the concepts of compactness (Ridgeway & Mozer, 2018) and completeness (Eastwood & Williams, 2018), meaning that "a factor is associated with only one code" (See also Appendix A.2). Like D. 1.1, we can require $h$ to be a product morphism:
210
+
211
+ Disentanglement 1.5. $h = \prod_{i=1}^{N}(h_{i,i}:Z_i\to Y_i)$ .
212
+
213
+ ![](images/e097a0e9011506f2d369ae3c3c9f6f0bace76a8da24ce3d90a7ded4757d2f9dc.jpg)
214
+
215
+ "The i-th code encodes the i-th factor faithfully."
216
+
217
+ If an encoder has a modular decoder, we can safely drop other codes if a downstream task only relies on a subset of factors. For example, if a task only depends on factor $Y_{i}$ , then a component encoder $f_{i}: X \to Z_{i}$ can encode sufficient information for this task.
218
+
219
+ We point out that an encoder with a modular decoder may not be modular itself:
220
+
221
+ Example (Duplicate). Let $Z$ be $Y \times Y$ . The diagonal morphism $\Delta_Y$ satisfies D. 1.5 with a retraction $p_1 \times p_2$ .
222
+
223
+ This means that a non-modular and explicit encoder may copy all the factors for each code $Z_{i} \coloneqq Y$ , and its modular decoder $h_{i,i}: Z_{i} \to Y_{i} \coloneqq p_{i}$ can simply project the code to each component, which is not what we expect.
224
+
225
+ A potential remedy to this issue is to require that the code does not contain any other information except for the target factor:
226
+
227
+ Disentanglement 1.6. $\forall i,j\in [1..N].\not\exists h_{i,j}:Z_i\to Y_j.$ $(i\neq j)\wedge (h_{i,j}\circ m_i = p_j)$
228
+
229
+ "The i-th code does not encode the j-th factor."
230
+
231
+ However, D. 1.6 is even harder to verify than D. 1.1 because it relies on the non-existence of some morphisms. This is another difficulty in dealing with non-modular encoders.
232
+
233
+ Fortunately, we can guarantee that a modular and explicit encoder must have a modular decoder:
234
+
235
+ Theorem 4. $(D,1.1\wedge D,1.4)\to D,1.5$
236
+
237
+ It is now clear that modularity (D. 1.1) and explicitness (D. 1.4) of an encoder should be the defining properties of disentanglement and our main focus when designing and evaluating disentangled representation learning algorithms. Waiving either of these requirements could cause problems. Our analysis supports similar arguments made by Ridgeway & Mozer (2018), Duan et al. (2020), and Carbonneau et al. (2022).
238
+
239
+ A minor issue is that a modular and explicit encoder may have a "non-explicit" decoder:
240
+
241
+ Example (Redundancy). Let $Z$ be $(Y_{1} \times Y_{1}) \times Y_{2}$ . The morphism $m = \Delta_{Y_1} \times \mathrm{id}_{Y_2}$ satisfies both D. 1.1 and D. 1.4.
242
+
243
+ It means that $Z_{1} \coloneqq Y_{1} \times Y_{1}$ contains redundant information of $Y_{1}$ . All meaningful codes are of the form $((y_{1}, y_{1}), y_{2})$ , while codes of the form $((y_{1}, y_{1}^{\prime}), y_{2})$ are meaningless and should not be decoded. In categorical terms, $m$ is a product morphism, a split monomorphism, but not an epimorphism. If we want to traverse the code space, we can additionally require $m$ to be a (split) epimorphism.
244
+
245
+ # 4. Algebra Actions and Equivariant Maps
246
+
247
+ We can simply change the category from Set to [S, Set].
248
+
249
+ In this section, we explain the above sentence by showing three ways to extend D. 1 and how it relates to the definition based on the direct product of groups (Higgins et al., 2018).
250
+
251
+ [S, C] denotes the functor category of functors from S to C and natural transformations between these functors. We call the category S a scheme. To see how it relates to the existing algebraic formulation of disentanglement, we need the following well-known fact:
252
+
253
+ Definition 3 (Equivalence as naturality). Many algebraic structures, such as monoids and groups, can be considered as single-object categories. Then, an action of an algebra at an object $A$ is precisely a functor $F_A: \mathbf{S} \to \mathbf{C}$ from the corresponding scheme $\mathbf{S}$ to a category $\mathbf{C}$ containing $A$ , and an equivariant map $f: A \to B$ between two actions $F_A$ and $F_B$ is precisely a natural transformation $\phi: F_A \Rightarrow F_B$ .
254
+
255
+ An example is shown below:
256
+
257
+ ![](images/ac9c910550163dcac3f1be3ffbc00c58277ecf5f8c69d0fa727c1f4d3a26231f.jpg)
258
+
259
+ We use subscript $a_{A} \coloneqq F_{A}a$ as an abbreviation. We can see that $F_{A}$ and $F_{B}$ send the single S-object $*$ to C-objects $A$ and $B$ and send endomorphisms to endomorphisms. In this way, we can consider S as syntax and C as semantics.
260
+
261
+ Example (Regression vs. Ranking). Not all problems can be formulated using only endomorphisms, let alone groups. Some ranking problems (Liu, 2011) roughly correspond to finding order-preserving functions, which is equivariant to actions of the free monoid of natural numbers $\mathbb{N}$ . However, the usual regression problems also require the preservation $f(x_0) = 0$ of the zero point, which is a nullary operation zero: $1 \to \mathbb{N}$ (a morphism from a singleton to the set $\mathbb{N}$ ).
262
+
263
+ # 4.1. Product Category and Functor Product
264
+
265
+ Let us now consider the products of categories and functors. We highlight the following two important properties:
266
+
267
+ - The category of small categories $\mathbf{Cat}$ is cartesian closed, with the product and exponential object given by the product category $\mathbf{S}_1 \times \mathbf{S}_2$ and functor category $[\mathbf{S}, \mathbf{C}]$ .
268
+ If $\mathbf{C}$ has (co)limits of a certain shape (e.g., product), then [S, C] has pointwise (co)limits of the same shape (e.g., functor product $F_{1} \times^{\mathbf{S}} F_{2}: \mathbf{S} \xrightarrow{\langle F_{1}, F_{2} \rangle} \mathbf{C} \times \mathbf{C} \xrightarrow{\times} \mathbf{C}$ ).<sup>2</sup>
269
+
270
+ Knowing if $\mathbf{C}$ has products then so does $[\mathbf{S},\mathbf{C}]$ , we can now extend D. 1 straightforwardly by simply changing the category from $\mathbf{C}$ to $[\mathbf{S},\mathbf{C}]$ :
271
+
272
+ Disentanglement 2 (A natural transformation to a functor product). Let $\mathbf{S}$ be a category, $\mathbf{C}$ be a category with products, and $F_{X}, F_{Z_{i}}: \mathbf{S} \to \mathbf{C}, i \in [1..N]$ be functors. A disentangled encoding process is a morphism to a product in $[\mathbf{S}, \mathbf{C}]$ , i.e., a natural transformation $\phi: F_{X} \Rightarrow F_{Z}$ to a functor product $F_{Z} := \prod_{i=1}^{N} F_{Z_{i}}$ .
273
+
274
+ In other words, the same scheme $\mathbf{S}$ has $N$ different models via $F_{Z_i}$ in $\mathbf{C}$ , which are combined into a single model via product $F_Z$ . In the product group action example (Higgins et al., 2018), $\mathbf{D}$ . 2 means that the product group is viewed as a single-object category $\mathbf{S}$ , and the product structure of automorphisms is preserved via the functor product.
275
+
276
+ Another approach is to view each group as a single-object category and the product group as a product category. Then, we can use the following definition:
277
+
278
+ Disentanglement 3 (A natural transformation between multifunctors). Let $\mathbf{S} = \prod_{i=1}^{N} \mathbf{S}_i$ be a product category, $\mathbf{C}$ be a category, and $F_X, F_Z: \mathbf{S} \to \mathbf{C}$ be multifunctors. A disentangled encoding process is a morphism in $[\mathbf{S}, \mathbf{C}]$ , i.e., a natural transformation $\phi: F_X \Rightarrow F_Z$ between multifunctors.
279
+
280
+ That is, a scheme $\mathbf{S}$ with $N$ components has a model in $\mathbf{C}$ . We can see that $\mathbf{D}$ .2 defines disentanglement via the product of functors (based on the product in the codomain category $\mathbf{C}$ ), while $\mathbf{D}$ .3 uses the product of domain categories (based
281
+
282
+ on the product in Cat). They have their own application scenarios, but due to space limits, we will not study D. 2 and D. 3 further in this paper.
283
+
284
+ # 4.2. Product-preserving Functors
285
+
286
+ Instead, let us consider a definition based on the product in the domain category S, which could be more flexible:
287
+
288
+ Disentanglement 4 (A natural transformation at a product). Let $\mathbf{S}$ be a category with binary products, $\mathbf{C}$ be a category, and $F_{X}, F_{Z}: \mathbf{S} \to \mathbf{C}$ be functors. A disentangled encoding process is a component of a natural transformation $\phi: F_{X} \Rightarrow F_{Z}$ at a product.
289
+
290
+ Additionally, if the codomain category $\mathbf{C}$ also has products, we can require that
291
+
292
+ Disentanglement 4.1. $F_{Z}$ is product-preserving.
293
+
294
+ In other words, $F_{Z}$ should be a cartesian (monoidal) functor, so products and projections in $\mathbf{S}$ are mapped to products and projections in $\mathbf{C}$ . An example is shown below:
295
+
296
+ ![](images/cbe100b094d9a1b80aa5963c470d5d1bdb9b5027cc89642bbe1496dc88b1eea0.jpg)
297
+
298
+ We can see that two S-objects $\ast$ and $\ast$ have a product $\ast \times \ast$ . $F_{Z}$ preserves products so $(a\times b)_{Z} = a_{Z}\times b_{Z}$ . A disentangled encoding process $f\coloneqq \phi_{*\times *}$ is a component of a natural transformation $\phi$ at a product $\ast \times \ast$ . Note that $X$ is not necessarily a product but its endomorphisms can have a product structure (Higgins et al., 2018).
299
+
300
+ Next, let us check what the counterpart of modularity is in the context of natural transformations. What we will do here is essentially the same as what we showed in Section 3.2. Again, it is natural to consider a code generating process $\mu : F_{Y} \Rightarrow F_{Z}$ in $[\mathbf{S},\mathbf{C}]$ , and we have a counterpart of Assumption 1 as follows:
301
+
302
+ Assumption 4. $F_{Y}$ is product-preserving.
303
+
304
+ Then, we can simply say that a modular encoder $\mu$ is a natural transformation between product-preserving functors. Even more, we can prove the following property:
305
+
306
+ Proposition 5. $\forall *, * \in \mathbf{S}$ . $\mu_{* \times *} = \mu_* \times \mu_*$ .
307
+
308
+ The reader should compare D. 4.1, Assumption 4, and Proposition 5 with D. 1.1.
309
+
310
+ The following commutative diagram encompasses all the requirements (cf. Proposition 1):
311
+
312
+ $$
313
+ \begin{array}{c} Y \xrightarrow [ p _ {i} ]{\quad \mu_ {A}} Z \\ \downarrow^ {\quad a _ {Y}} Y \xrightarrow [ p _ {i} ]{\quad \mu_ {A}} \stackrel {| p _ {i} \stackrel {} {-} a _ {Z}} {\longrightarrow} Z \\ Y _ {i} \xrightarrow [ a _ {i _ {Y}} ]{\quad p _ {i} ]} Z _ {i} \xrightarrow [ Y _ {i} ]{\quad \mu_ {A _ {i}} ]{\mu_ {A _ {i}}} Z _ {i} \xrightarrow [ \mu_ {A _ {i}} ]{\quad a _ {i _ {Z _ {i}}}} Z _ {i} \end{array} \tag {10}
314
+ $$
315
+
316
+ The three axes correspond to (i) product, (ii) endomorphism, and (iii) natural transformation, respectively.
317
+
318
+ Up to this point, our definition includes the one proposed by Higgins et al. (2018) as a special case. The reader may have noticed that there is only a counterpart of modularity D. 1.1 but not explicitness D. 1.4. Without the requirement, we may encounter the same failure case:
319
+
320
+ Example (Constant). The constant functor $\Delta 1$ satisfies D. 4.1 with a natural transformation $e_{Y}:Y\to 1$
321
+
322
+ To patch this, one way is to require that
323
+
324
+ Disentanglement 4.2. $F_{Z}$ is faithful.
325
+
326
+ This means that $F_Z$ is injective on morphisms for each pair of S-objects. We need to rule out unfaithful models of a scheme lest we end up with uninformative representations. This requirement also tells us some basic properties the codes $Z$ should have such as the minimal size or dimension, depending on the choice of the scheme S.
327
+
328
+ On the other hand, the exact counterpart of explicitness D. 1.4 is as follows:
329
+
330
+ Disentanglement 4.3. $\mu$ is a split monomorphism.
331
+
332
+ D. 4.3 is a stronger notion when $F_{Y}$ is also faithful:
333
+
334
+ Theorem 6. $D$ . $4.3 \to D$ . 4.2.
335
+
336
+ As a final note, we point out that D. 4 is more flexible because it is not limited to endomorphisms:
337
+
338
+ Example (Binary operation). Let $\ast$ be an S-object (which itself can be a product) and $c: \ast \times \ast \rightarrow \ast$ an S-morphism. The following diagram describes how binary operations can exhibit disentanglement:
339
+
340
+ $$
341
+ \begin{array}{c} \ast \times \ast \supset a \times b \\ \begin{array}{c} \lambda \downarrow c ^ {\prime} \\ \ast \\ \begin{array}{c} \lambda^ {\prime} \\ f \times f = \phi_ {* \times *} \\ \end{array} \end{array} \\ a _ {X} \times b _ {X} \subsetneq X \times X \\ c _ {X} \downarrow \\ X \xrightarrow [ f := \phi_ {*} ]{} Z \end{array} \tag {11}
342
+ $$
343
+
344
+ Regarding $c \circ (a \times b)$ , the functoriality and naturality lead to the following requirement:
345
+
346
+ $$
347
+ f \left(c _ {X} \left(a _ {X} \left(x _ {1}\right), b _ {X} \left(x _ {2}\right)\right)\right) = c _ {Z} \left(a _ {Z} \left(f \left(x _ {1}\right)\right), b _ {Z} \left(f \left(x _ {2}\right)\right)\right).
348
+ $$
349
+
350
+ This formulation is particularly useful when dealing with multiple instances or heterogeneous inputs (Gatys et al.,
351
+
352
+ 2016; Liu et al., 2018). Further investigation is left for future work.
353
+
354
+ In summary, we showed that seemingly distinct approaches to disentanglement (Ridgeway & Mozer, 2018; Higgins et al., 2018) can be described by the same abstract language, and their underlying mechanisms (e.g., modularity and product-preserving action) are essentially the same. The core is the cartesian product of sets, functions, algebras, actions, objects, morphisms, categories, and functors.
355
+
356
+ # 5. Sets and Relations
357
+
358
+ The Cartesian product of sets is not cartesian in Rel.
359
+
360
+ In this section, we present an example of (non-cartesian) monoidal products using Rel, the category of sets and relations (Patterson, 2017).
361
+
362
+ We may want to work with relations instead of functions if we need to consider (i) unannotated factors, (ii) multiple observations for the same factor, or (iii) only a subset of all combinations of factors. Besides, Rel serves as a bridge between functions and probabilities, which will be discussed in the next section.
363
+
364
+ To characterize Rel, it is convenient to consider it as the Kleisli category of the powerset monad $P$ in Set:
365
+
366
+ $$
367
+ \mathbf {R e l} := \mathbf {S e t} _ {P}. \tag {12}
368
+ $$
369
+
370
+ The powerset monad $P$ sends a set $A$ to its powerset $PA$ and a function $f: A \to B$ to a set function $Pf: PA \to PB$ . Its Kleisli category $\mathbf{Rel}$ has relations $A \rightsquigarrow B$ as the Kleisli morphisms, which are precisely set-valued functions $A \to PB$ . The composition is the Kleisli composition $g \Leftarrow f$ , given by the relation composition.
371
+
372
+ Relations arise naturally in practice. For example, if we have a labeling process $l: X \to Y$ , which is a function in Set, a data generating process $g: Y \rightsquigarrow X := l^*$ can be defined as its inverse image, which is not a function anymore but a relation in Rel. Then, $g$ now can map a factor to multiple observations or the empty set.
373
+
374
+ # 5.1. Monoidal Product of Relations
375
+
376
+ Next, let us examine the product structures in Rel. We point out the following three important facts about Rel:
377
+
378
+ - Rel is cartesian and cocartesian, with both the product and coproduct given by the disjoint union of sets $A \oplus B$ .
379
+ - Rel is monoidal closed, with both the monoidal product and internal hom given by the Cartesian product of sets $A \otimes B$ and the monoidal unit given by the singleton $\{*\}$ .
380
+
381
+ - Rel is pointed, with the zero object (an object that is both initial and terminal) given by the empty set $\varnothing$ .
382
+
383
+ That is, in Rel, the Cartesian product of sets is monoidal, but confusingly, not cartesian. So a relation $A \rightsquigarrow B \otimes C$ to a Cartesian product of two sets is more than just a pair of relations $A \rightsquigarrow B$ and $A \rightsquigarrow C$ . On the other hand, the monoidal product/internal hom $\otimes$ gives us an isomorphism between hom-sets:
384
+
385
+ $$
386
+ \operatorname {H o m} (A \otimes B, C) \cong \operatorname {H o m} (A, B \otimes C), \tag {13}
387
+ $$
388
+
389
+ which leads to the following example:
390
+
391
+ $$
392
+ \begin{array}{l} \begin{array}{l}(a, 0)\\(a, 1)\\(b, 0)\\(b, 1)\end{array}\underset {y} {\bigvee} x \cong\begin{array}{l}a \xleftarrow {(0 , x)}\\b \xleftarrow {(0 , y)}\\(1, x)\\(1, y)\end{array}\left. \right.\rightleftharpoons\begin{array}{l}a \xleftarrow {0} b \xrightarrow {1} a \xrightarrow {x}\\A \xrightarrow {} B\\A \xrightarrow {} C\end{array}\tag {14} \\ A \otimes B \twoheadrightarrow C \quad A \twoheadrightarrow B \otimes C \\ \end{array}
393
+ $$
394
+
395
+ Rel is an example of how the cartesian product $\oplus$ is not an appropriate concept for disentanglement, while a suitable one $\otimes$ only has a monoidal structure. The monoidal unit $\{*\}$ is different from the terminal object $\varnothing$ , so Rel is not even semicartesian. Although we still can define the "duplicating" and "deleting" operations (Patterson, 2017, Section 3.3), they do not behave as nicely as those diagonal and terminal morphisms in Set because of their non-naturality.
396
+
397
+ Then, how can we characterize disentanglement? At least, we still have a counterpart of disentanglement D. 1:
398
+
399
+ Disentanglement 5 (A morphism to a monoidal product). In a symmetric monoidal category $\mathbf{C}$ , a disentangled encoding process is a morphism $f: X \to Z$ to a monoidal product $Z := \bigotimes_{i=1}^{N} Z_i$ .
400
+
401
+ Further, we can extend the definition of modularity D. 1.1:
402
+
403
+ Disentanglement 5.1. $m = \bigotimes_{i=1}^{N}(m_{i,i}:Y_i\to Z_i)$ .
404
+
405
+ So, D. 1 and D. 1.1 are special cases of D. 5 and D. 5.1 for a cartesian category. However, without projections, D. 5.1 is more difficult to verify than D. 1.1.
406
+
407
+ Then, how can we resolve this? One way is to restrict our attention to right-unique relations, i.e., partial functions, so duplication behaves nicely (Eq. (2)), but it means that there is at most one observation for each factor. We can also focus on left-total relations, i.e., multivalued functions, so deletion behaves nicely (Eq. (3)), but we need to assume that there is at least one observation for each factor (Fritz, 2020, Example 2.6). If we want both, then we will end up with Set — a cartesian subcategory of Rel. Despite its many good properties, Set might be too restrictive if we want to incorporate uncertainty in disentanglement. Later we will see that a semicartesian category with (not necessarily natural) diagonals might be a balanced choice, which provides a rich collection of operations to characterize disentanglement.
408
+
409
+ # 5.2. Functor Category, Revisited
410
+
411
+ Before moving on to the next section, "can we change from Rel to [S, Rel]?" we have to ask. First, the fact that [S, C] has a pointwise monoidal structure derived from C tells us that D. 2 generalizes to the functor monoidal product straightforwardly. Second, D. 4.1 is a special case of the following requirement for a cartesian category:
412
+
413
+ # Disentanglement $4.1'$ . $F_Z$ is a monoidal functor.
414
+
415
+ Higgins et al. (2018) mainly worked with the direct sum $\oplus$ (direct product $\times$ ) of vector spaces and briefly mentioned the tensor product $\otimes$ . We remind the reader that their decisive difference is between the cartesian and monoidal products.
416
+
417
+ # 6. Measurable Spaces and Stochastic Maps
418
+
419
+ We can copy/delete in a Markov category like Stoch.
420
+
421
+ Besides the algebraic approach (Higgins et al., 2018), the probabilistic, statistical, and information-theoretic methods (Higgins et al., 2017; Chen et al., 2018; Kumar et al., 2018; Suter et al., 2019; Do & Tran, 2020) are perhaps the most popular tools for disentangled representation learning. In this section, we outline the essential operations required for characterizing disentanglement of stochastic maps.
422
+
423
+ The structure is similar to that of Rel: the category Stoch of measurable spaces and stochastic maps (Markov kernels) is the Kleisli category of the Giry monad $P$ in the category Meas of measurable spaces and measurable functions:
424
+
425
+ $$
426
+ \mathbf {S t o c h} := \mathbf {M e a s} _ {P}. \tag {15}
427
+ $$
428
+
429
+ The Giry monad $P$ sends a measurable set $A$ to the set $PA$ of probability measures on $A$ and a measurable function $f: A \to B$ to its pushforward $f_{*}: PA \to PB$ . The Kleisli morphisms are stochastic maps $p(B|A)$ , and the Kleisli composition $p(C|A) = p(C|B) \Leftrightarrow p(B|A)$ is the Chapman-Kolmogorov equation (Giry, 1982).
430
+
431
+ # 6.1. Joint Distribution and Conditional Independence
432
+
433
+ Next, let us start by highlighting the impossibility result in Locatello et al. (2019), which is essentially about the product structures of Stoch. It can be succinctly restated in the categorical language as
434
+
435
+ Theorem 7 (Locatello et al. (2019, Theorem 1)). Stoch is not cartesian.
436
+
437
+ This theorem implies the following diagram (cf. Eq. (1)):
438
+
439
+ $$
440
+ Z _ {1} \xleftarrow {\underset {\pi_ {1}} {p _ {1}}} Z _ {1} \otimes Z _ {2} \xrightarrow [ \uparrow f \neq \mathrm {i d} _ {Z _ {1} \otimes Z _ {2}} ]{I} Z _ {2} \tag {16}
441
+ $$
442
+
443
+ It means that a joint distribution $p(Z_1, Z_2)$ is not uniquely
444
+
445
+ specified by its marginals $p_1(Z_1)$ and $p_2(Z_2)$ . Locatello et al. (2019) explicitly constructed a family of bijections $f: Z \to Z$ using the inverse transform sampling technique.
446
+
447
+ Note the projection morphisms $\pi_1$ and $\pi_2$ used in Eq. (16), which are available because Stoch is a Markov category (Fritz, 2020). A Markov category, roughly speaking, is a category in which every object $A$ is equipped with a "copy" $\mathrm{copy}_A: A \to A \otimes A$ (not necessarily natural in $A$ ) and a "delete" $\mathrm{del}_A: A \to I$ (natural in $A$ ) morphism satisfying some coherence conditions. Therefore, all morphisms are deletable but only some are copyable, which allows for a sufficiently expressive category with enough operations to characterize disentanglement:
448
+
449
+ Disentanglement 6 (A Markov kernel to a joint). In a Markov category $\mathbf{C}$ , a disentangled encoding process is a Markov kernel $f: X \to Z$ to a joint $Z := \bigotimes_{i=1}^{N} Z_i$ .
450
+
451
+ We point out that the conditional independence $A \perp B \parallel C$ of a Markov kernel $p(A, B|C)$ (Fritz, 2020, Definition 12.12) can be used to derive a prerequisite for the modularity of an encoder $m: Y \to Z$ :
452
+
453
+ Disentanglement 6.1. $\forall i\in [1..N].Z_{i}\perp Z_{\backslash i}\parallel Y.$
454
+
455
+ Let $m_i:Y\to Z_i\coloneqq \operatorname{del}_{Z_{\backslash i}}\circ m$ be the marginalization of $m$ over $Z_{\backslash i}$ and $\mathrm{copy}_A^N:A\to A^{\otimes N}$ a "multiple copy" morphism. We can prove that D. 6.1 is equivalent to the following equational identity (cf. Eq. (1)):
456
+
457
+ Disentanglement 6.2. $m = \left(\bigotimes_{i=1}^{N} m_{i}\right) \circ \mathrm{copy}_{Y}^{N}$ .
458
+
459
+ $$
460
+ \begin{array}{c c} \framebox {Z _ {1}} & Z _ {2} \\ \framebox {m} \\ \framebox {Y} \end{array} = \quad \begin{array}{c c} \framebox {|} & \bullet \\ \framebox {m} & \framebox {|} \\ \framebox {|} & m \\ \framebox {|} & \bullet \\ \framebox {|} & \bullet \\ \framebox {|} & \bullet \\ \framebox {|} & \bullet \\ \framebox {|} & \bullet \\ \framebox {|} & \bullet \\ \framebox {|} & \bullet \\ \framebox {|} & \bullet \\ \framebox {|} & \bullet \\ \framebox {|} & \bullet \\ \framebox {|} & \bullet \\ \framebox {|} & \bullet \end{array}
461
+ $$
462
+
463
+ Theorem 8. $D$ . 6.1 $\leftrightarrow D$ . 6.2.
464
+
465
+ We call an encoder satisfying D. 6.2 projectable. This is a more fine-grained condition than the total correlation used in Chen et al. (2018) because it is conditioned on the factors.
466
+
467
+ With this precondition, we can finally define the modularity of a stochastic encoder:
468
+
469
+ Disentanglement 6.3. $\forall i\in [1..N].\forall n_i:Y_i\to Y_{\backslash i}.$ $Z_{i}\perp Y_{\backslash i}\parallel Y_{i}$
470
+
471
+ ![](images/37fecd3b7f29ff53b5069ed2f6fc64fd1b789f1f0f4871a8b4d6127d1abd4416.jpg)
472
+
473
+ The reader may have noticed that this means that any $n_i: Y_i \to Y_{\backslash i}$ behaves like a deterministic morphism (Fritz, 2020, Definition 10.1) when composed with $m_i: Y \to Z_i$ .
474
+
475
+ Why do we need this? It is because, not like Rel, where $A \otimes B \rightsquigarrow C$ is the same thing as $A \rightsquigarrow B \otimes C$ (Eq. (13)), in Stoch, $\operatorname{Hom}(A, B \otimes C)$ is larger than $\operatorname{Hom}(A \otimes B, C)$ . We need a "probe" in $\operatorname{Hom}(A, B)$ to characterize how $C$ depends on $B$ , and $n_i : Y_i \to Y_{\backslash i}$ serves as this probe.
476
+
477
+ Based on this, we revealed a common loophole in existing statistical approaches: if we use the mutual information between factors and codes to characterize disentanglement, the distribution of factors is assumed to be fixed (Chen et al., 2018; Li et al., 2020; Tokui & Sato, 2022). However, the training and test distributions could be different (Trauble et al., 2021), and the existing definitions may be too coarse-grained and insufficient in such a situation.
478
+
479
+ # 6.2. Structured Markov Kernels
480
+
481
+ An important fact is that the category of functors to the subcategory of deterministic morphisms is again a Markov category (Fritz, 2020, Section 7), so we can deal with "structured" Markov kernels. We end our discussion with an example based on this fact, without any category theory jargon, to show what we can get from our formulation.
482
+
483
+ Example $([\mathbb{N},\mathbf{Set}_N]_{\mathrm{det}})$ . A robot processing video feed of multiple objects should be able to (i) identify objects, (ii) understand that objects continue to exist even if they are occluded (object permanence), and (iii) track the movement of hidden objects (invisible displacement). All these abilities should not be affected by the shape or color of the objects.
484
+
485
+ With category theory, we can formulate such a complex task with ease because the components are compositional. See Appendix B for a detailed explanation.
486
+
487
+ # 7. Limitations
488
+
489
+ As an initial step toward categorical characterization of disentanglement, this work only focused on the definitions. Many other important aspects of disentanglement were excluded, such as metrics, supervision, learnability, models, methods, and experimental validation.
490
+
491
+ With a clear understanding of the definitions in place, the immediate next step would be to find a systematic way to enrich a definition to a metric. We hypothesize that a good direction includes the following three steps:
492
+
493
+ - equality $\rightsquigarrow$ metric
494
+ universal quantification $\rightsquigarrow$ aggregation
495
+ existential quantification $\rightsquigarrow$ approximation
496
+
497
+ With a good metric, we can quantify how good a model is, even if it does not strictly satisfy a disentanglement
498
+
499
+ definition. For example, from Theorem 4, we know that a modular and explicit encoder must have a modular decoder. Given some modularity and explicitness metrics, we may want to know how much the modularity and explicitness of an encoder imply the modularity of its decoder.
500
+
501
+ Other potential future directions include the studies of partial combination of factors (Section 5) and unknown projection (Assumption 3). The relation between D. 2, D. 3, and D. 4 deserves further investigation. How to formulate disentanglement in more complex learning problems, such as reinforcement learning, is also an interesting direction. While we have obtained more results for cartesian categories due to their favorable properties, further theoretical analyses on the monoidal category case would be useful.
502
+
503
+ # 8. Conclusion
504
+
505
+ In this work, we presented a meta-analysis of several definitions of disentanglement (Cohen & Welling, 2014; 2015; Ridgeway & Mozer, 2018; Eastwood & Williams, 2018; Higgins et al., 2018; Chen et al., 2018) using category theory as a unifying language. We revealed that some seemingly distinct formulations are just different manifestations of the same structures, the cartesian products and monoidal products, in different categories. We argued that the modularity (product morphism) and explicitness (split monomorphism) should be the defining properties of disentanglement and introduced tools to analyze these properties in various settings, including equivariant maps (functor category) and stochastic maps (Markov category). We also reinterpreted some existing results (Locatello et al., 2019) and provided support to some arguments based on empirical evidence (Ridgeway & Mozer, 2018; Träuble et al., 2021). We hope our findings can help researchers choose the most appropriate definition of disentanglement for their specific task and consequently discover better metrics, models, methods, and algorithms for disentangled representation learning.
506
+
507
+ # Acknowledgements
508
+
509
+ We would like to thank Tobias Fritz for answering our questions about Markov categories. We thank Wei Wang and Johannes Ackermann for their valuable feedback on the draft. We also thank the anonymous reviewers for their useful comments and constructive suggestions. Finally, we would like to express our gratitude to all contributors to nLab, MathOverflow, and StackExchange for creating a sharing community.
510
+
511
+ YZ was supported by JSPS KAKENHI Grant Number 22J12703. MS was supported by JST CREST Grant Number JPMJCR18A2.
512
+
513
+ # References
514
+
515
+ Adamek, J., Herrlich, H., and Strecker, G. E. Abstract and Concrete Categories: The Joy of Cats. John Wiley and Sons, 1990. URL http://www.tac.mta.ca/tac/reprints/articles/17/tr17abs.html. A.5
516
+ Awodey, S. Category theory. Oxford University Press, 2006. URL https://doi.org/10.1093/acprof:os0/9780198568612.001.0001.1, 2, A.1
517
+ Baez, J. Applied category theory 2018 | the n-category café, 2017. URL https://golem.ph.utexas.edu/category/2017/09/applied_category_th
518
+ eory_1.html.1
519
+ Baez, J. C., Fritz, T., and Leinster, T. A characterization of entropy in terms of information loss. Entropy, 13(11): 1945-1957, 2011. URL https://doi.org/10.3 390/e13111945. https://arxiv.org/abs/ 1106.1791.A.5
520
+ Bengio, Y., Courville, A., and Vincent, P. Representation learning: A review and new perspectives. IEEE transactions on pattern analysis and machine intelligence, 35(8):1798-1828, 2013. URL https://doi.org/10.1109/TPAMI.2013.50. https://arxiv.org/abs/1206.5538.1
521
+ Borceux, F. Handbook of categorical algebra: volume 1, Basic category theory, volume 1. Cambridge University Press, 1994. URL https://doi.org/10.1017/CBO9780511525858.1
522
+ Bradley, T.-D. What is applied category theory? arXiv preprint arXiv:1809.05923, 2018. URL https://arxiv.org/abs/1809.05923.1
523
+ Carbonneau, M.-A., Zaidi, J., Boilard, J., and Gagnon, G. Measuring disentanglement: A review of metrics. IEEE Transactions on Neural Networks and Learning Systems, 2022. URL https://doi.org/10.1109/TNNLS.2022.3218982. https://arxiv.org/abs/2012.09276.1,3.3
524
+ Chen, R. T., Li, X., Grosse, R. B., and Duvenaud, D. K. Isolating sources of disentanglement in variational autoencoders. In Neural Information Processing Systems, 2018. URL https://proceedings.neurips.cc/paper/2018/black/1ee3dfcd8a0645a25 a35977997223d22-Abstract.html. 1, 6, 6.1, 6.1, 8
525
+ Cho, K. and Jacobs, B. Disintegration and bayesian inversion via string diagrams. Mathematical Structures in Computer Science, 29(7):938-971, 2019. URL https://doi.org/10.1017/S0960129518000488. https://arxiv.org/abs/1709.00322. A.7
526
+
527
+ Cohen, T. and Welling, M. Learning the irreducible representations of commutative lie groups. In International Conference on Machine Learning, 2014. URL https://proceedings.mlr.press/v32/cohen14.html. 1,8,A.4,A.4
528
+ Cohen, T. and Welling, M. Transformation properties of learned visual representations. In International Conference on Learning Representations, 2015. URL http://arxiv.org/abs/1412.7659.1,8,A.4
529
+ de Haan, P., Cohen, T., and Welling, M. Natural graph networks. Neural Information Processing Systems, 33: 3636-3646, 2020. URL https://proceedings.neurips.cc/paper/2020/bash/2517756c5a9be6ac007fe9bb7fb92611-Abstract.html.1
530
+ Dittadi, A., Trauble, F., Locatello, F., Wuthrich, M., Agrawal, V., Winther, O., Bauer, S., and Scholkopf, B. On the transfer of disentangled representations in realistic settings. In International Conference on Learning Representations, 2021. URL https://open review.net/forum?id=8VXvj1QNrl1.1
531
+ Do, K. and Tran, T. Theory and evaluation metrics for learning disentangled representations. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=HJgKoh4Ywr.6
532
+ Duan, S., Matthew, L., Saraiva, A., Watters, N., Burgess, C., Lerchner, A., and Higgins, I. Unsupervised model selection for variational disentangled representation learning. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=SyxL2TNtvr.3.3
533
+ Dudzik, A. J. and Velicković, P. Graph neural networks are dynamic programmers. In Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=wu1Za9dY1GY.1
534
+ Eastwood, C. and Williams, C. K. A framework for the quantitative evaluation of disentangled representations. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=By-7dz-AZ.1,3.2,3.3,3.3,8
535
+ Fong, B. and Spivak, D. I. An invitation to applied category theory: seven sketches in compositionality. Cambridge University Press, 2019. URL https://doi.org/10.1017/97811086688804. https://arxiv.org/abs/1803.05316.1
536
+ Franz, U. What is stochastic independence? In Noncommutativity, infinite-dimensionality and probability at
537
+
538
+ the crossroads, pp. 254-274. World Scientific, 2002. URL https://doi.org/10.1142/9789812705242_0008. https://arxiv.org/abs/math/0206017.2.2
539
+ Fritz, T. A synthetic approach to markov kernels, conditional independence and theorems on sufficient statistics. Mathematics, 370:107239, 2020. URL https://doi.org/10.1016/j.aim.2020.107239. https://arxiv.org/abs/1908.07021.1,2.2,5.1,6.1,6.1,6.1,6.2,A.6,4,B,C
540
+ Gatys, L. A., Ecker, A. S., and Bethge, M. Image style transfer using convolutional neural networks. In Computer Vision and Pattern Recognition, 2016. URL https://doi.org/10.1109/CVPR.2016.265.4.2
541
+ Gavranovic, B. Compositional deep learning. Master's thesis, University of Zagreb, 2019. URL https://arxiv.org/abs/1907.08292.1
542
+ Giry, M. A categorical approach to probability theory. Categorical Aspects of Topology and Analysis, pp. 68-85, 1982. URL https://doi.org/10.1007/BFb0092872.6
543
+ Higgins, I., Matthey, L., Pal, A., Burgess, C., Glorot, X., Botvinick, M., Mohamed, S., and Lerchner, A. beta-VAE: Learning basic visual concepts with a constrained variational framework. In International Conference on Learning Representations, 2017. URL https://open review.net/forum?id=Sy2fzU9gl.1,6
544
+ Higgins, I., Amos, D., Pfau, D., Racaniere, S., Matthew, L., Rezende, D., and Lerchner, A. Towards a definition of disentangled representations. arXiv preprint arXiv:1812.02230, 2018. URL https://arxiv.org/abs/1812.02230.1, 4, 4.1, 4.2, 4.2, 4.2, 5.2, 6, 8, A.4, A.4
545
+ Kumar, A., Sattigeri, P., and Balakrishnan, A. Variational inference of disentangled latent concepts from unlabeled observations. In International Conference on Learning Representations, 2018. URL https://openreview .net/forum?id=H1kG7GZAW.6
546
+ Leinster, T. Basic category theory, volume 143. Cambridge University Press, 2014. URL https://doi.org/10.1017/CBO9781107360068. https://arxiv.org/abs/1612.09375.1
547
+ Leinster, T. Monoidal categories with projections | the n-category café, 2016. URL https://golem.ph.ute.xas.edu/category/2016/08/monoidal_categories_with_proje.html.2.2
548
+
549
+ Li, Z., Murkute, J. V., Gyawali, P. K., and Wang, L. Progressive learning and disentanglement of hierarchical representations. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=SJxpsxrYPS.6.1
550
+ Liu, T.-Y. Learning to Rank for Information Retrieval. Springer, 2011. URL https://doi.org/10.1007/978-3-642-14267-3.4
551
+ Liu, X., Van De Weijer, J., and Bagdanov, A. D. Leveraging unlabeled data for crowd counting by learning to rank. In Computer Vision and Pattern Recognition, 2018. URL https://doi.org/10.1109/CVPR.2018.00799.4.2
552
+ Locatello, F., Bauer, S., Lucic, M., Raetsch, G., Gelly, S., Schölkopf, B., and Bachem, O. Challenging common assumptions in the unsupervised learning of disentangled representations. In International Conference on Machine Learning, 2019. URL https://proceedings.mrlr.press/v97/locatello19a.html. 1, 6.1, 7, 6.1, 8
553
+ Montero, M. L., Ludwig, C. J., Costa, R. P., Malhotra, G., and Bowers, J. The role of disentanglement in generalisation. In International Conference on Learning Representations, 2020. URL https://openreview .net/forum?id=qbH974jKUVy.1
554
+ Patterson, E. Knowledge representation in bicategories of relations. arXiv preprint arXiv:1706.00526, 2017. URL https://arxiv.org/abs/1706.00526.5,5.1
555
+ Piedeleu, R. and Zanasi, F. An introduction to string diagrams for computer scientists. arXiv preprint arXiv:2305.08768, 2023. URL https://arxiv.org/abs/2305.08768. A.1
556
+ Ridgeway, K. and Mozer, M. C. Learning deep disentangled embeddings with the f-statistic loss. In Neural Information Processing Systems, 2018. URL https://proceedings.neurips.cc/paper/2018/hash/2b24d495052a8ce66358eb576b8912c8-Abstract.html. 1, 3.2, 3.2, 3.3, 3.3, 3.3, 4.2, 8
557
+ Selinger, P. A survey of graphical languages for monoidal categories. In New structures for physics, pp. 289-355. Springer, 2010. URL https://doi.org/10.1007/978-3-642-12821-9_4. https://arxiv.org/abs/0908.3347.2, A.1
558
+ Sepliarskaia, A., Kiseleva, J., and de Rijke, M. Evaluating disentangled representations. arXiv preprint arXiv:1910.05587, 2019. URL https://arxiv.org/abs/1910.05587.1
559
+
560
+ Shiebler, D., Gavranovic, B., and Wilson, P. Category theory in machine learning. arXiv preprint arXiv:2106.07032, 2021. URL https://arxiv.org/abs/2106.07032.1
561
+ Shu, R., Chen, Y., Kumar, A., Ermon, S., and Poole, B. Weakly supervised disentanglement with guarantees. In International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=HJgSwyBKvr.3.3
562
+ Suter, R., Miladinovic, D., Scholkopf, B., and Bauer, S. Robustly disentangled causal mechanisms: Validating deep representations for interventional robustness. In International Conference on Machine Learning, 2019. URL http://proceedings.mlr.press/v97/suter19a.html.6
563
+ Tokui, S. and Sato, I. Disentanglement analysis with partial information decomposition. In International Conference on Learning Representations, 2022. URL https://openreview.net/forum?id=pETy-HVvGtt.6.1
564
+ Trauble, F., Creager, E., Kilbertus, N., Locatello, F., Dittadi, A., Goyal, A., Scholkopf, B., and Bauer, S. On disentangled representations learned from correlated data. In International Conference on Machine Learning, 2021. URL https://proceedings.mlr.press/v139/trauble21a.html.6.1,8
565
+ Wang, T., Yue, Z., Huang, J., Sun, Q., and Zhang, H. Self-supervised learning disentangled group representation as feature. In Neural Information Processing Systems, 2021. URL https://openreview.net/forum?id=RQfckT1M_4.3.3
566
+
567
+ # A. Additional Remarks
568
+
569
+ # A.1. Diagram
570
+
571
+ We frequently use commutative diagrams (Awodey, 2006) and string diagrams (Selinger, 2010; Piedeleu & Zanasi, 2023) as graphical calculus.
572
+
573
+ In a commutative diagram for a category, nodes are objects, arrows are morphisms, and paths are compositions of morphisms. Any morphisms with the same domains and codomains are equal, i.e., any two paths starting from $A$ and ending with $B$ result in the same morphism.
574
+
575
+ In a string diagram for a symmetric monoidal category, rectangles are morphisms (from bottom to top), lines without rectangles are identity morphisms. Juxtaposition of two morphisms means the monoidal product, and cross means the braiding. Domains and codomains of morphisms are often omitted.
576
+
577
+ # A.2. Compactness
578
+
579
+ Note that there could be two interpretations of compactness.
580
+
581
+ A non-compact encoder can mean:
582
+
583
+ (a) We can recover $Y_{j}$ from $Z_{i}$ ; or
584
+ (b) $Z_{i}$ itself is a product $Z_{i1} \times Z_{i2}$ .
585
+
586
+ We argue that (a) is what we want to avoid, while (b) is more or less harmless. For example, we can decompose $\mathbb{R}^{10}$ into $\mathbb{R}^2\times \mathbb{R}^3\times \mathbb{R}^5$ , where each component again can be decomposed into smaller parts. However, sometimes this is beneficial: while embedding a cycle into a vector space, $\mathbb{R}^2$ may be a better choice than $\mathbb{R}$ because the embedding can be continuous. In this work, we do not pay much attention to whether each code $Z_{i}$ is "minimal".
587
+
588
+ # A.3. Functorial Semantics
589
+
590
+ Can we formulate modularity in terms of functors and natural transformations? The answer is yes, because the product, as a limit, can be defined via functors in the first place. Here, we only give an alternative definition of D. 1.1:
591
+
592
+ Disentanglement $1.1'$ . $m$ is a natural transformation between functors from a discrete category.
593
+
594
+ $$
595
+ \begin{array}{l} Y _ {1} \xrightarrow [ \text {人} ^ {\prime} ]{\text {人}} Z _ {1} \\ Y _ {2} \xrightarrow [ m _ {2 , 2} := m _ {*} ]{} Z _ {2} \end{array} \tag {17}
596
+ $$
597
+
598
+ It shows that a modular encoder $m$ is merely a collection of component morphisms $m_{i,i}: Y_i \to Z_i$ . Nothing more, nothing less.
599
+
600
+ # A.4. Commutativity and Irreducibility
601
+
602
+ Cohen & Welling (2014) in their seminal paper used the irreducible representations of compact commutative Lie groups to define and study disentangled representations, while Higgins et al. (2018) used the direct product of groups. Here, we briefly remark on the product, commutativity, and irreducibility.
603
+
604
+ First, let us keep it simple and only consider unital magma — a set with a unital binary operation. If we have two unital magmas $(M, \circ_{M}, e_{M})$ and $(N, \circ_{N}, e_{N})$ , we can define their product, denoted by $P = M \times N$ , as the Cartesian product of their underlying sets equipped with a binary operation $\circ_{P}: (M \times N) \times (M \times N) \to (M \times N)$ given by
605
+
606
+ $$
607
+ \left(m _ {1}, n _ {1}\right) \circ_ {P} \left(m _ {2}, n _ {2}\right) := \left(m _ {1} \circ_ {M} m _ {2}, n _ {1} \circ_ {N} n _ {2}\right), \tag {18}
608
+ $$
609
+
610
+ whose unit is $e_P \coloneqq (e_M, e_N)$ . We can see that the product is also a unital magma.
611
+
612
+ Then, we can find that every element $(m,n)$ in the product $P$ can be decomposed in two ways:
613
+
614
+ $$
615
+ \begin{array}{l} (m, n) \\ = \left(m \circ_ {M} e _ {M}, e _ {N} \circ_ {N} n\right) = \left(m, e _ {N}\right) \circ_ {P} \left(e _ {M}, n\right) \tag {19} \\ = \left(e _ {M} \circ_ {M} m, n \circ_ {N} e _ {N}\right) = \left(e _ {M}, n\right) \circ_ {P} \left(m, e _ {N}\right), \\ \end{array}
616
+ $$
617
+
618
+ which can be expressed in string diagrams:
619
+
620
+ $$
621
+ \begin{array}{c c c c c} \hline m & n \\ \hline \end{array} = \begin{array}{c c c c c} \hline m \\ \hline n \\ \hline \end{array} = \begin{array}{c c c c c} \hline m \\ \hline \end{array} (2 0)
622
+ $$
623
+
624
+ We can identify $(m, e_N)$ as $m$ and $(e_M, n)$ as $n$ because of the unit magma isomorphisms:
625
+
626
+ $$
627
+ (M, \circ_ {M}, e _ {M}) \cong (M \times \{e _ {N} \}, \circ_ {P} | _ {M \times \{e _ {N} \}}, e _ {P}), \tag {21}
628
+ $$
629
+
630
+ $$
631
+ \left(N, \circ_ {N}, e _ {N}\right) \cong \left(\left\{e _ {M} \right\} \times N, \circ_ {P} \right| _ {\left\{e _ {M} \right\} \times N}, e _ {P}). \tag {22}
632
+ $$
633
+
634
+ From this perspective, as long as we can define a serial combination $\circ$ and its unit $e$ for each component, the product operation $\times$ allows us to combine elements from different components in parallel commutatively. We can deal with one component at a time, and the order of the components does not matter. However, note that the serial combination within a component may not be commutative, such as the 3D rotations SO(3) (Cohen & Welling, 2015; Higgins et al., 2018).
635
+
636
+ This property may inspire us to "discover" disentangled components from observational data using commutativity: we can find components such that elements from the same component are closed under composition, and elements from different components are commutative.
637
+
638
+ Such a decomposition may not be unique. For example, consider $\mathbb{R}^2$ with addition $+$ (as a unital magma, a monoid, or a group). $A = \{(a,0)\mid a\in \mathbb{R}\}$ , $B = \{(0,b)\mid b\in \mathbb{R}\}$ , and $C = \{(c,c)\mid c\in \mathbb{R}\}$ are all subalgebras of $\mathbb{R}^2$ , and both $A\times B$ and $A\times C$ are isomorphic to $\mathbb{R}^2$ via the addition.
639
+
640
+ Learning the (potentially product) algebraic structure from data and determining an appropriate decomposition based on commutativity is an interesting research direction.
641
+
642
+ Besides, it is worth noting that Cohen & Welling (2014) identified a connection between irreducible representations and disentanglement, which is not covered in this work. Furthermore, Cohen & Welling (2015) made an insightful observation that irreducibility is also linked to the statistical dependency structure of the representation. Using tools such as functor categories and Markov categories, we may obtain more fruitful results on the connection between algebraic and statistical properties of disentanglement.
643
+
644
+ # A.5. Probability
645
+
646
+ Note that Meas has finite products: $(A, \Sigma_A) \times (B, \Sigma_B) \coloneqq (A \times B, \Sigma_A \times \Sigma_B)$ , where $\Sigma_A \times \Sigma_B$ is the coarsest $\sigma$ -algebra such that two projections are measurable.
647
+
648
+ A useful construction is the category of probability measures and measure-preserving functions, which can be defined based on the concept of the comma category:
649
+
650
+ $\mathbf{Prob}:= 1\hookrightarrow \mathbf{Stoch}\downarrow \mathbf{Meas}\to \mathbf{Stoch}.$ (23)
651
+
652
+ Concretely, $1 \hookrightarrow$ Stoch is the inclusion functor, and the functor $\mathbf{Meas} \to \mathbf{Stoch}$ sends a measurable set $A$ to itself and a measurable function $f$ to its pushforward $f_{*}$ .
653
+
654
+ Prob is a category whose objects are (isomorphic to) probability measures $(A,1\stackrel {p_A}{\longrightarrow}PA)$ , and morphisms $p_A\to p_B$ are measure-preserving functions $f:A\rightarrow B$ such that $p_B = f_*p_A$ . This category will be important when characterizing the metrics based on entropy and mutual information (Baez et al., 2011).
655
+
656
+ Meas, Stoch, and Prob can be illustrated as follows:
657
+
658
+ ![](images/bb17e0b7ad051af46a41bf2fe6674fc695bfee25926b513cb60e2b00b82f00b6.jpg)
659
+
660
+ All arrows are morphisms in Meas; red arrows are objects in Prob; yellow arrows are morphisms in Prob; green squiggly arrows represent morphisms in Stoch, which are the same as red or blue arrows.
661
+
662
+ The commutativity of red and yellow arrows indicates the
663
+
664
+ composition of measure-preserving functions in Prob; the commutativity of blue and black arrows indicates the Kleisli composition of stochastic maps in Stoch.
665
+
666
+ As a side note, we can also use this construction to define the category of relations and relation-preserving functions (Adamek et al., 1990, Scion 3.3):
667
+
668
+ $$
669
+ \mathbf {1} \hookrightarrow \operatorname {R e l} \downarrow \operatorname {S e t} \rightarrow \operatorname {R e l}. \tag {25}
670
+ $$
671
+
672
+ # A.6. Markov Category
673
+
674
+ A Markov category (Fritz, 2020) is a symmetric monoidal category in which every object $A$ is equipped with a commutative comonoid structure given by a comultiplication $\mathrm{copy}_A: A \to A \otimes A$ and a counit $\mathrm{del}_A: A \to I$ , depicted in string diagrams as
675
+
676
+ $$
677
+ \operatorname {c o p y} _ {A} = \bigcup_ {\bullet} \quad \operatorname {d e l} _ {A} = \bigstar \tag {26}
678
+ $$
679
+
680
+ and satisfying some compatibility conditions.
681
+
682
+ # A.7. Conditional Independence
683
+
684
+ Definition 4 (Conditional independence (Fritz, 2020, Definition 12.16)). A morphism $f: A \to X \otimes W \otimes Y$ displays the conditional independence $X \perp Y \mid W \parallel A$ if there are morphisms $g: A \to W$ , $h: A \otimes W \to X$ and $k: W \otimes A \to Y$ such that
685
+
686
+ ![](images/38f756caf59625e70e1a64b5c1ebd65ebb67b5f8fe15b0446815f3cc7aadd943.jpg)
687
+
688
+ Two special cases are when $A = I$ we have $X \perp Y \mid W$ and when $W = I$ we have $X \perp Y \parallel A$ .
689
+
690
+ Another way to define the modularity of a stochastic encoder is as follows, which relies on the existence of some other morphisms (cf. D. 1.2):
691
+
692
+ Disentanglement 6.4. $\forall i\in [1..N].m_{i} = m_{i,i}\otimes \mathrm{del}_{Y_{\backslash i}}$
693
+
694
+ ![](images/ab73ae4460c47f17c46305fae7ede01c06060c226251d975b4fc6c7001fad392.jpg)
695
+
696
+ This condition was also studied in Cho & Jacobs (2019, Proposition 6.9). We can see that it is stronger than D. 6.3:
697
+
698
+ Theorem 9. $D$ . 6.4 $\rightarrow$ D. 6.3.
699
+
700
+ However, it is not yet clear if they are equivalent.
701
+
702
+ # B. Example
703
+
704
+ Let us start from the category Set. Consider the nonempty multiset monad $N$ in Set, which sends a set $A$ to $\mathbb{N}^A\setminus \varnothing$ . For example, the set $\{a,b\}$ is sent to
705
+
706
+ $$
707
+ \{\{(a, 1) \}, \{(a, 2) \}, \dots , \{(b, 1) \}, \dots , \{(a, 1), (b, 1) \}, \dots \}
708
+ $$
709
+
710
+ The Kleisli category $\mathbf{Set}_N$ of this monad consists of sets and multiset functions. A multiset function $f: A \rightsquigarrow B$ outputs how many ways to get a target $b \in B$ from a source $a \in A$ . The composition of multiset functions is defined by the multiplication and sum of natural numbers. This category is a Markov category.
711
+
712
+ A Markov category $\mathbf{C}$ has a cartesian subcategory $\mathbf{C}_{\mathrm{det}}$ of deterministic morphisms. Given a small category $\mathbf{S}$ , the subcategory $[\mathbf{S},\mathbf{C}]_{\mathrm{det}}$ of the functor category $[\mathbf{S},\mathbf{C}]$ , which consists of functors of the form $\mathbf{S} \to \mathbf{C}_{\mathrm{det}} \hookrightarrow \mathbf{C}$ , is again a Markov category (Fritz, 2020, Section 7, notation slightly modified). The category $[\mathbf{S},\mathbf{C}]_{\mathrm{det}}$ contains deterministic diagrams of shape $\mathbf{S}$ and stochastic maps between them that preserve the shape.
713
+
714
+ The set of natural numbers can be considered a single-object category $(\ast, \mathbb{N}, +, 0)$ with the numbers as morphisms and the addition as the composition. The identity morphism $\mathrm{id}_{*}$ is the number 0.
715
+
716
+ Based on these, let us consider the category $[\mathbb{N},\mathbf{Set}_N]_{\mathrm{det}}$ . This category contains sets equipped with endofunctions indexed by natural numbers as objects and multiset functions between these sets that preserve their endofunctions as morphisms. A natural transformation to a constant functor (which maps all morphisms to the identity morphism) in this category means that no matter how the input changes with time, the count is invariant. An example is shown below:
717
+
718
+ ![](images/c5f0e19c1aa25f7e06829942d6b3d5c9627430dcef8a40c3e810c126c38f1aba.jpg)
719
+
720
+ If we want to characterize more complex behavior, we may simply change the source category $\mathbb{N}$ and define a proper category (possibly with a product structure) that encodes our requirements. The extension is left for future work.
721
+
722
+ # C. Proofs
723
+
724
+ Proposition 1.
725
+
726
+ $$
727
+ \begin{array}{c} Y _ {1} \xleftarrow {p _ {1}} Y _ {1} \times Y _ {2} \xrightarrow {p _ {2}} Y _ {2} \\ \begin{array}{l} m _ {1, 1} \Bigg \downarrow \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \end{array} \\ Z _ {1} \xleftarrow {p _ {1}} Z _ {1} \times Z _ {2} \xrightarrow {p _ {2}} Z _ {2} \end{array} \tag {28}
728
+ $$
729
+
730
+ ![](images/a4ff70a5c4154ae16674a22ee7cac8c5f3629039bdbf0616d75a3631034770a3.jpg)
731
+
732
+ Proposition 2.
733
+
734
+ $$
735
+ \begin{array}{c} Y _ {1} \xleftarrow {p _ {1}} Y _ {1} \times 1 \\ \mathrm {i d} _ {Y _ {1}} \Bigg \downarrow \quad \stackrel {\cong} {\longrightarrow} \quad \Bigg \downarrow \mathrm {i d} _ {Y _ {1}} \times y _ {2} \\ Y _ {1} \xleftarrow {p _ {1}} Y _ {1} \times Y _ {2} \\ m _ {1, 1} \Bigg \downarrow \quad \quad \quad \quad \quad \Bigg \downarrow m \\ Z _ {1} \xleftarrow {p _ {1}} Z _ {1} \times Z _ {2} \end{array} \tag {29}
736
+ $$
737
+
738
+ ![](images/e0f24fc9419776ba67038b90c65d790d17ca65b925398b12222aad6b68bc846f.jpg)
739
+
740
+ Theorem 3 can be proven using the following lemma:
741
+
742
+ Lemma 10. Let $f: A \times B \to C$ be a morphism from a product and $\widehat{f}: B \to C^A$ its exponential transpose. Then, there exists a morphism $f': A \to C$ such that $f = f' \circ p_1$ if and only if the exponential transpose $\widehat{f}$ is a constant morphism.
743
+
744
+ Proof. Diagram chase:
745
+
746
+ ![](images/b0547a8eb99fe606472f1f88a7b2227f7b32c730382dc768b59057f8f4737b66.jpg)
747
+
748
+ We need to use the following commutative diagrams: (i) red: the universal property of the exponential object $C^A$ and the evaluation morphism $\epsilon_A$ ; (ii) green: the constant morphism $\widehat{f}$ , which factors through the terminal object 1 and defines the morphism $\widehat{f}'$ ; (iii) blue: the product morphism $\mathrm{id}_A \times \widehat{f}'$ ; and (iv) yellow: the definition of $f'$ .
749
+
750
+ It is easy to prove $\widehat{f}:B\to C^A$ is a constant morphism if $f = f^{\prime}\circ p_{1}$ . Suppose $\widehat{f}:B\to C^{A}$ is a constant morphism, so it factors through the terminal object 1. We denote the morphism by $\widehat{f}':1\to C^A$ . We can define $f^{\prime}:A\to C$ as $\epsilon_{A}\circ (\mathrm{id}_{A}\times \widehat{f}^{\prime})$ . To prove $f = f^{\prime}\circ p_{1}$ , i.e., $f = \epsilon_{A}\circ (\mathrm{id}_{A}\times \widehat{f}^{\prime})\circ p_{1}$ , we only need to show $\mathrm{id}_A\times \widehat{f} = (\mathrm{id}_A\times \widehat{f}^\prime)\circ (\mathrm{id}_A\times e_B)$ . This triangle commutes because it is simply a product of the identity morphism $\mathrm{id}_A$ and the constant morphism $\widehat{f}$ .
751
+
752
+ Alternatively, we can also characterize product morphisms using pullback. Concretely, let $Y \times_{Y_i} Y$ be the pullback of the projections $p_i: Y \to Y_i$ and $\pi_1, \pi_2: Y \times_{Y_i} Y \to Y$ be the pullback projections. In the category Set of sets, the pullback $Y \times_{Y_i} Y = \{(y, y') \in Y \times Y \mid y_i = y'_i\}$ is the set of pairs of factors whose $i$ -th components are the same. Then, $m$ is a product morphism if and only if $m_i \circ \pi_1 = m_i \circ \pi_2$ , i.e., $m_i(y_i, y_{\backslash i}) = m_i(y_i, y_{\backslash i}')$ . This can be proven using the following lemma:
753
+
754
+ Lemma 11. Let $f: A \times B \to C$ be a morphism from a product and $(A \times B) \times_A (A \times B)$ be the pullback of the projections $p_1: A \times B \to A$ with two pullback projections $\pi_1, \pi_2: (A \times B) \times_A (A \times B) \to A \times B$ . Then, there exists a morphism $f': A \to C$ such that $f = f' \circ p_1$ if and only if $f \circ \pi_1 = f \circ \pi_2$ .
755
+
756
+ Proof. Diagram chase:
757
+
758
+ ![](images/7711375a3b81b2036e6e86db53a93dd0c32686a31d783df7395b3c63b1c3f53e.jpg)
759
+
760
+ Suppose that $f = f' \circ p_1$ . Because the pullback rectangle commutes, $p_1 \circ \pi_1 = p_1 \circ \pi_2$ , it is easy to show that $f \circ \pi_1 = f' \circ p_1 \circ \pi_1 = f' \circ p_1 \circ \pi_2 = f \circ \pi_2$ .
761
+
762
+ Now suppose that $f \circ \pi_1 = f \circ \pi_2$ . We define $f': A \to C$ as $f \circ \langle \mathrm{id}_A, g \rangle$ for an arbitrary morphism $g: A \to B$ . To prove $f = f' \circ p_1$ , we can consider two morphisms $\operatorname{id}_{A \times B}$ and $\langle \mathrm{id}_A, g \rangle \circ p_1$ . Because they complete the commutative diagram of the pullback $(A \times B) \times_A (A \times B)$ , $p_1 \circ \operatorname{id}_{A \times B} = p_1 \circ \langle \mathrm{id}_A, g \rangle \circ p_1 = p_1$ , there exists a unique morphism $u: A \times B \to (A \times B) \times_A (A \times B)$ such that $\pi_1 \circ u = \operatorname{id}_{A \times B}$ and $\pi_2 \circ u = \langle \mathrm{id}_A, g \rangle \circ p_1$ . We can now chase the diagram to show that $f = f \circ \operatorname{id}_{A \times B} = f \circ \pi_1 \circ u = f \circ \pi_2 \circ u = f \circ \langle \mathrm{id}_A, g \rangle \circ p_1 = f' \circ p_1$ .
763
+
764
+ To prove that this construction does not depend on specific choice of $g: A \to B$ , let us consider two morphisms $g, g': A \to B$ . Because $\langle \mathrm{id}_A, g \rangle$ and $\langle \mathrm{id}_A, g' \rangle$ complete
765
+
766
+ the commutative diagram of the pullback, there exists a unique morphism $v: A \to (A \times B) \times_A (A \times B)$ such that $\pi_1 \circ v = \langle \mathrm{id}_A, g' \rangle$ and $\pi_2 \circ v = \langle \mathrm{id}_A, g \rangle$ . Then, $f \circ \langle \mathrm{id}_A, g \rangle = f \circ \pi_2 \circ v = f \circ \pi_1 \circ v = f \circ \langle \mathrm{id}_A, g' \rangle$ , which shows that $f' = f \circ \langle \mathrm{id}_A, g \rangle$ is independent of the choice of $g: A \to B$ .
767
+
768
+ Based on this, we can obtain the following diagram:
769
+
770
+ ![](images/ca823c3c70b8a32c061f6378153446970ac32ca0354b2590c82536e4ec90f0ec.jpg)
771
+
772
+ Both Lemmas 10 and 11 show that there are alternative ways to characterize "invariance", without a group theoretical formulation.
773
+
774
+ Theorem 4.
775
+
776
+ $$
777
+ \begin{array}{c}\left(\begin{array}{c}\rightarrow Y _ {1} \xleftarrow {p _ {1}} Y _ {1} \times Y _ {2}\\\mathrm {i d} _ {Y _ {1}} \Bigg \downarrow\\Y _ {1} \xleftarrow {p _ {1}} Y _ {1} \times Y _ {2}\\m _ {1, 1} \Bigg \downarrow\\Z _ {1} \xleftarrow {p _ {1}} Z _ {1} \times Z _ {2}\\\mathrm {i d} _ {Z _ {1}} \uparrow \stackrel {\cong} {\longrightarrow} \Bigg \downarrow\end{array}\right) h\\Z _ {1} \xleftarrow {p _ {1}} Z _ {1} \times z _ {2}\end{array}\tag {33}
778
+ $$
779
+
780
+ ![](images/63377bc2c00700d19b53fb24eca61905cd5e8093cf609c21b9e70441086e0b5b.jpg)
781
+
782
+ Proposition 5. Let $F, G: \mathbf{C} \to \mathbf{D}$ be product preserving functors.
783
+
784
+ ![](images/9e39d839bb7fc2ba1cab7d35a6e8c4d69308aeaa74ba755cd4b7b022a45eb94e.jpg)
785
+
786
+ Theorem 6. Let $F, G: \mathbf{C} \to \mathbf{D}$ be functors, $\alpha: F \Rightarrow G$ be a natural transformation.
787
+
788
+ $$
789
+ \begin{array}{c} F A \xrightarrow {\alpha_ {A}} G A \\ F p, F q \Bigg \downarrow \quad \Bigg \downarrow G p, G q \\ F B \xrightarrow {\alpha_ {B}} G B \end{array} \tag {35}
790
+ $$
791
+
792
+ We have the following reasoning:
793
+
794
+ - $F$ is not faithful: $\exists p \neq q$ . $Fp = Fq$
795
+ - $\alpha$ is natural: $Fp = Fq \to Gp \circ \alpha_{A} = Gq \circ \alpha_{A}$
796
+ - $\alpha$ is epic: $Gp\circ \alpha_{A} = Gq\circ \alpha_{A}\to Gp = Gq$
797
+
798
+ Then,
799
+
800
+ $F$ is not faithful $\wedge \alpha$ is epic $\rightarrow G$ is not faithful. (36)
801
+
802
+ Or equivalently,
803
+
804
+ $\alpha$ is epic $\rightarrow (G$ is faithful $\rightarrow F$ is faithful). (37)
805
+
806
+ Similarly,
807
+
808
+ - $G$ is not faithful: $\exists p \neq q$ . $Gp = Gq$
809
+ - $\alpha$ is natural: $Gp = Gq \to \alpha_{B} \circ Fp = \alpha_{B} \circ Fq$
810
+ - $\alpha$ is monic: $\alpha_{B} \circ Fp = \alpha_{B} \circ Fq \to Fp = Fq$
811
+
812
+ Then,
813
+
814
+ $G$ is not faithful $\wedge \alpha$ is monic $\rightarrow F$ is not faithful. (38)
815
+
816
+ Or equivalently,
817
+
818
+ $\alpha$ is monic $\rightarrow (F$ is faithful $\rightarrow G$ is faithful). (39)
819
+
820
+ ![](images/786232673e663cbabd78683f800470a9a96e1a38a5254a488bb001704ab21efa.jpg)
821
+
822
+ Theorem 8. When $N = 2$ , D. 6.2 is the definition of D. 6.1 (Fritz, 2020, Lemma 12.11). When $N > 2$ , we can apply this equation recursively.
823
+
824
+ ![](images/ed1fa07dd631aed2d55183ba6b36e8d0c1b8087c12068cab2440df207b7cf33f.jpg)
825
+
826
+ ![](images/8fbd4d4eab140dd57d577107fc7343b952e4c64a4adc484168f60e34ff332c76.jpg)
acategorytheoreticalmetaanalysisofdefinitionsofdisentanglement/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00a947e9951b0191e8049cde99d8d5cee220c1cdae2970ed39614cf4535c4d04
3
+ size 441905
acategorytheoreticalmetaanalysisofdefinitionsofdisentanglement/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3ecc6b84a16b6b16da98a53c8ca6513407cc392621863629cfc6b533d79ee35
3
+ size 1050085
acloserlookatfewshotclassificationagain/bcec5be9-469f-4be1-a990-fdf6de674987_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4eb97ffe2ee7892ebdb7c731299476d6bedc24bc6962b705d6abbef910b39401
3
+ size 171346
acloserlookatfewshotclassificationagain/bcec5be9-469f-4be1-a990-fdf6de674987_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a94f5a5cca098c249d9430bf382b434a0a4a0514ee54854b08c3db611b3fc20
3
+ size 194887
acloserlookatfewshotclassificationagain/bcec5be9-469f-4be1-a990-fdf6de674987_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccf514927bf315a65fb7c997be9c987cd53f244055b9c31738cb7a82fdc8a0d4
3
+ size 985375
acloserlookatfewshotclassificationagain/full.md ADDED
The diff for this file is too large to render. See raw diff
 
acloserlookatfewshotclassificationagain/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6df45a7dcfee3144f71360bcd11cfd5b85f8cd09a355b2de990648f28ef90df0
3
+ size 1963392
acloserlookatfewshotclassificationagain/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce703cf17f3a2b4c533b568eab89135354ad0490265f4f90d4f3b510f4440ebb
3
+ size 763955
acloserlookatselfsupervisedlightweightvisiontransformers/7e5a55d5-3963-44b6-830b-97d8f7478c51_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7642ee0c9fbe4c214b9caf897f6128b3e9ebd7a29609dac2f892a4e2b884385c
3
+ size 127459
acloserlookatselfsupervisedlightweightvisiontransformers/7e5a55d5-3963-44b6-830b-97d8f7478c51_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10850bfcb27adb5c695c29c3f47e1ee4ab52c43b4fc66a6bd707d702657f7e02
3
+ size 151140
acloserlookatselfsupervisedlightweightvisiontransformers/7e5a55d5-3963-44b6-830b-97d8f7478c51_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:baeadcb53017c1a8c98b71cf1da59f1c3a6e1400e8f228fcd872db6498d2ee0a
3
+ size 3634900
acloserlookatselfsupervisedlightweightvisiontransformers/full.md ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A Closer Look at Self-Supervised Lightweight Vision Transformers
2
+
3
+ Shaoru Wang $^{12}$ Jin Gao $^{12}$ Zeming Li $^{3}$ Xiaoqin Zhang $^{4}$ Weiming Hu $^{125}$
4
+
5
+ # Abstract
6
+
7
+ Self-supervised learning on large-scale Vision Transformers (ViTs) as pre-training methods has achieved promising downstream performance. Yet, how much these pre-training paradigms promote lightweight ViTs' performance is considerably less studied. In this work, we develop and benchmark several self-supervised pre-training methods on image classification tasks and some downstream dense prediction tasks. We surprisingly find that if proper pre-training is adopted, even vanilla lightweight ViTs show comparable performance to previous SOTA networks with delicate architecture design. It breaks the recently popular conception that vanilla ViTs are not suitable for vision tasks in lightweight regimes. We also point out some defects of such pre-training, e.g., failing to benefit from large-scale pre-training data and showing inferior performance on data-insufficient downstream tasks. Furthermore, we analyze and clearly show the effect of such pre-training by analyzing the properties of the layer representation and attention maps for related models. Finally, based on the above analyses, a distillation strategy during pre-training is developed, which leads to further downstream performance improvement for MAE-based pre-training. Code is available at https://github.com/wangsr126/mae-lite.
8
+
9
+ # 1. Introduction
10
+
11
+ Self-supervised learning (SSL) has shown great progress in representation learning without heavy reliance on expen
12
+
13
+ $^{1}$ State Key Laboratory of Multimodal Artificial Intelligence Systems, Institute of Automation, Chinese Academy of Sciences $^{2}$ School of Artificial Intelligence, University of Chinese Academy of Sciences $^{3}$ Megvii Research $^{4}$ Key Laboratory of Intelligent Informatics for Safety & Emergency of Zhejiang Province, Wenzhou University $^{5}$ School of Information Science and Technology, ShanghaiTech University. Correspondence to: Jin Gao <jin.gao@nlpr.ia.ac.cn>.
14
+
15
+ Proceedings of the $40^{th}$ International Conference on Machine Learning, Honolulu, Hawaii, USA. PMLR 202, 2023. Copyright 2023 by the author(s).
16
+
17
+ sive labeled data. SSL focuses on various pretext tasks for pre-training. Among them, several works (He et al., 2020; Chen et al., 2020; Grill et al., 2020; Caron et al., 2020; Chen et al., 2021a; Caron et al., 2021) based on contrastive learning (CL) have achieved comparable or even better accuracy than supervised pre-training when transferring the learned representations to downstream tasks. Recently, another trend focuses on masked image modeling (MIM) (Bao et al., 2021; He et al., 2021; Zhou et al., 2022), which perfectly fits Vision Transformers (ViTs) (Dosovitskiy et al., 2020) for vision tasks, and achieves improved generalization performance. Most of these works, however, involve large networks with little attention paid to smaller ones. Some works (Fang et al., 2020; Abbasi Koohpayegani et al., 2020; Choi et al., 2021) focus on CL on small convolutional networks (ConvNets) and improve the performance by distillation. However, the pre-training of lightweight ViTs is considerably less studied.
18
+
19
+ Efficient neural networks are essential for modern ondevice computer vision. Recent studies on achieving top-performing lightweight models mainly focus on designing network architectures (Sandler et al., 2018; Howard et al., 2019; Graham et al., 2021; Ali et al., 2021; Heo et al., 2021; Touvron et al., 2021b; Mehta & Rastegari, 2022; Chen et al., 2021b; Pan et al., 2022), while little attention is paid to how to optimize the training strategies for these models. We believe the latter is also of vital importance, and the utilization of pre-training is one of the most hopeful approaches along this way, since it has achieved great progress on large models. To this end, we develop and benchmark recently popular self-supervised pre-training methods, e.g., CL-based MoCo-v3 (Chen et al., 2021a) and MIM-based MAE (He et al., 2021), along with fully-supervised pre-training for lightweight ViTs as the baselines on ImageNet and other classification tasks, as well as some dense prediction tasks, e.g., object detection and segmentation. We surprisingly find that if proper pre-training is adopted, even vanilla lightweight ViTs show comparable performance to previous SOTA networks with delicate design, e.g., we achieve $79.0\%$ top-1 accuracy on ImageNet with vanilla ViT-Tiny (5.7M). The finding is intriguing since the result indicates that proper pre-training could bridge the performance gap between naive network architectures and delicately designed ones to a great extent, while naive architectures usually have
20
+
21
+ faster inference speed, by getting rid of some complicated operators. We also point out some defects of such pretraining, e.g., failing to benefit from large-scale pre-training data and showing inferior performance on data-insufficient downstream tasks.
22
+
23
+ These findings motivate us to dive deep into the working mechanism of these pre-training methods for lightweight ViTs. More specifically, we introduce a variety of model analysis methods to study the pattern of layer behaviors during pre-training and fine-tuning, and investigate what really matters for downstream performance. First, we find that lower layers of the pre-trained models matter more than higher ones if sufficient downstream data is provided, while higher layers matter in data-insufficient downstream tasks. Second, we observe that the pre-training with MAE makes the attention of the downstream models more local and concentrated, i.e., introduces locality inductive bias, which may be the key to the performance gain. Based on the above analyses, we also develop a distillation strategy for MAE-based pre-training, which significantly improves the pre-training of lightweight ViTs. Better downstream performance is achieved especially on data-insufficient classification tasks and detection tasks.
24
+
25
+ # 2. Preliminaries and Experimental Setup
26
+
27
+ ViTs. We use ViT-Tiny (Touvron et al., 2021a) in our study to examine the effect of the pre-training on downstream performance, which contains 5.7M parameters. We adopt the vanilla architecture, consisting of a patch embedding layer and 12 Transformer blocks with an embedding dimension of 192, except that the number of heads is increased to 12 as we find it can improve the model's expressive power. ViT-Tiny is chosen for study because it is an ideal experimental object, on which almost all existing pre-training methods can be directly applied. And it has a rather naive architecture: non-hierarchical, and with low human inductive bias in design. Thus the influence of the model architecture design on our analyses can be eliminated to a great extent.
28
+
29
+ Evaluation Metrics. We adopt fine-tuning as the default evaluation protocol considering that it is highly correlated with utility (Newell & Deng, 2020), in which all the layers are tuned by initializing them with the pre-trained models. By default, we do the evaluation on ImageNet (Deng et al., 2009) by fine-tuning on the training set and evaluating on the validation set. Several other downstream classification datasets (e.g., Flowers (Nilsback & Zisserman, 2008), Aircraft (Maji et al., 2013), CIFAR100 (Krizhevsky et al., 2009), etc.) and object detection and segmentation tasks on COCO (Lin et al., 2014) are also exploited for comparison. For a more thorough study, analyses based on linear probing evaluation are presented in Appendix B.2.
30
+
31
+ Compared Methods. Baseline: We supervisedly train a ViT-Tiny from scratch for 300 epochs on the training set of ImageNet-1k (dubbed IN1K). It achieves $74.5\%$ top-1 accuracy on the validation set of ImageNet-1k, surpassing that in the original architecture ( $72.2\%$ (Touvron et al., 2021a)) through modifying the number of heads to 12 from 3, and further reaches $75.8\%$ by adopting our improved training recipe (see Appendix A.1), which finally serves as our strong baseline to examine the pre-training. We denote this model from supervised training as DeiT-Tiny.
32
+
33
+ MAE: MAE (He et al., 2021) is selected as a representative for MIM-based pre-training methods, which has a simple framework with low training cost. We largely follow the design of MAE except that the encoder is altered to ViT-Tiny. Several basic factors and components are adjusted to fit the smaller encoder (see Appendix A.2). By default, we do pre-training on IN1K for 400 epochs, and denote the pre-trained model as MAE-Tiny.
34
+
35
+ MoCov3: We also implement a contrastive SSL pre-training counterpart, MoCo-v3 (Chen et al., 2021a), which is selected for its simplicity. We also do 400-epoch pre-training and denote the pre-trained model as MoCov3-Tiny. Details are provided in Appendix A.3.
36
+
37
+ Some other methods, e.g., MIM-based SimMIM (Xie et al., 2022) and CL-based DINO (Caron et al., 2021) are also involved, but are moved to Appendix B.3 due to space limitation.
38
+
39
+ # 3. How Well Does Pre-Training Work on Lightweight ViTs?
40
+
41
+ In this section, we first benchmark the aforementioned pretrained models on ImageNet, and then further evaluate their transferability to other datasets and tasks.
42
+
43
+ # 3.1. Benchmarks on ImageNet Classification Tasks
44
+
45
+ Which pre-training method performs best? We first develop and benchmark the pre-training methods on ImageNet, involving the baseline that does not adopt any pre-training, supervised pre-training on the training set of ImageNet-21k (a bigger and more diverse dataset, as roughly ten times the size of IN1K, dubbed IN21K) and the aforementioned self-supervised pre-training with MoCo-v3 and MAE. As reported in Tab. 1, most of these supervised and self-supervised pre-training methods improve the downstream performance, whilst MAE outperforms others and consumes moderate training cost. The results indicate that the vanilla ViTs have great potential, which can be unleashed via proper pre-training. It encourages us to further explore how the enhanced ViTs perform compared to recent SOTA ConvNets and ViT derivatives.
46
+
47
+ Table 1. Comparisons on pre-training methods. We report top-1 accuracy on the validation set of ImageNet-1k. IN1K and IN21K indicate the training set of ImageNet-1k and ImageNet-21k. The pre-training time is measured on $8 \times \mathrm{V}100$ GPU machine. 'ori.' represents the supervised training recipe from Touvron et al. (2021a) and 'impr.' represents our improved recipe (see Appendix A.1).
48
+
49
+ <table><tr><td rowspan="2">Methods</td><td rowspan="2">Pre-training Data</td><td rowspan="2">Epochs</td><td rowspan="2">Time (hour)</td><td colspan="2">Fine-tuning</td></tr><tr><td>recipe</td><td>Top-1 Acc. (%)</td></tr><tr><td>-</td><td>-</td><td>-</td><td>-</td><td>ori.</td><td>74.5</td></tr><tr><td>-</td><td>-</td><td>-</td><td>-</td><td>impr.</td><td>75.8</td></tr><tr><td>Supervised (Steiner et al., 2021)</td><td>IN21K w/ labels</td><td>30</td><td>20</td><td>impr.</td><td>76.9</td></tr><tr><td>Supervised (Steiner et al., 2021)</td><td>IN21K w/ labels</td><td>300</td><td>200</td><td>impr.</td><td>77.8</td></tr><tr><td>MoCo-v3 (Chen et al., 2021a)</td><td>IN1K w/o labels</td><td>400</td><td>52</td><td>impr.</td><td>76.8†</td></tr><tr><td>MAE (He et al., 2021)</td><td>IN1K w/o labels</td><td>400</td><td>23</td><td>impr.</td><td>78.0</td></tr></table>
50
+
51
+ $\dagger$ Global average pooling is used instead of the default configuration based on the class token during the fine-tuning. See Appendix A.1 for details.
52
+
53
+ How do the enhanced ViTs with pre-training rank among SOTA lightweight networks? To answer the question, we further compare the enhanced ViT-Tiny with MAE pre-training to previous lightweight ConvNets and ViT derivatives. We report top-1 accuracy along with the model parameter count and the throughput in Tab. 3. We denote the fine-tuned model based on MAE-Tiny as MAETiny-FT. Specifically, we extend the fine-tuning epochs to 1000 following Touvron et al. (2021a) and adopt relative position embedding. Under this strong fine-tuning recipe, the pre-training still contributes a 1.2 performance gain, ultimately reaching $79.0\%$ top-1 accuracy. It sets a new record for lightweight vanilla ViTs, even without distillation during the supervised training phase on IN1K. It can also be seen that the pre-training can accelerate the downstream convergence, which helps to surpass that trained from scratch for 1000 epochs $(77.8\%)$ with only 300-epoch fine-tuning $(78.5\%)$ .
54
+
55
+ We conclude that the enhanced ViT-Tiny is on par with or even outperforms most previous ConvNets and ViT derivatives with comparable parameters or throughput. This demonstrates that we can also achieve SOTA performance based on a naive network architecture by adopting proper pre-training, rather than designing complex ones. Significantly, naive architecture usually has faster inference speed and is friendly to deployment.
56
+
57
+ We also notice that there are some works applying supervised pre-training (Ridnik et al., 2021), CL-based self-supervised pre-training (Fang et al., 2020) and MIM-based self-supervised pre-training (Woo et al., 2023) on lightweight ConvNets. However, we find that ViT-Tiny benefits more from the pre-training (e.g., +1.2 vs. +0.5 for ConvNeXt V2-F). We attribute it to that the plain architecture of ViT-Tiny with less artificial design may possess more model capacity.
58
+
59
+ Can the pre-training benefit from more data? One may be curious about whether it is possible to achieve better downstream performance by involving more pre-training data, as it does on large models. Unfortunately, the answer
60
+
61
+ Table 2. Effect of pre-training data. Top-1 accuracy is reported.
62
+
63
+ <table><tr><td>Datasets</td><td>MoCo-v3</td><td>MAE</td></tr><tr><td>IN1K</td><td>76.8</td><td>78.0</td></tr><tr><td>1% IN1K</td><td>76.2 (-0.6)</td><td>77.9 (-0.1)</td></tr><tr><td>10% IN1K</td><td>76.5 (-0.3)</td><td>78.0 (+0.0)</td></tr><tr><td>IN1K-LT</td><td>76.1 (-0.7)</td><td>77.9 (-0.1)</td></tr><tr><td>IN21K</td><td>76.9 (+0.1)</td><td>78.0 (+0.0)</td></tr></table>
64
+
65
+ is no for the examined pre-training methods. We consider IN21K, a much larger dataset. The number of pre-training iterations is kept constant for a fair comparison. However, few improvements are observed for both MoCo-v3 and MAE as shown in Tab. 2. We further consider two subsets of IN1K containing $1\%$ and $10\%$ of the total examples (1% IN1K and $10\%$ IN1K) balanced in terms of classes (Assran et al., 2021) and one subset with long-tailed class distribution (Liu et al., 2019) (IN1K-LT). Surprisingly, marginal performance declines are observed for MAE when pre-training on these subsets, showing more robustness than MoCo-v3 in terms of the pre-training data scale and class distribution.
66
+
67
+ # 3.2. Benchmarks on Transfer Performance
68
+
69
+ We further examine the transferability of these models pretrained on IN1K, involving their transfer performance on some other classification tasks and dense prediction tasks. In addition to the self-supervised MAE-Tiny and MoCov3-Tiny, DeiT-Tiny is also involved, as a fully-supervised counterpart which is trained on IN1K for 300 epochs.
70
+
71
+ Can the pre-trained models transfer well on data-insufficient tasks? We introduce several classification tasks (Nilsback & Zisserman, 2008; Parkhi et al., 2012; Maji et al., 2013; Krause et al., 2013; Krizhevsky et al., 2009; Van Horn et al., 2018) to investigate their transferability. We conduct the transfer evaluation by fine-tuning these pre-trained models on these datasets (see Appendix A.4 for more details). As shown in Tab. 4, using various pre-training methods shows better performance than using random initialization, but the relative superiority and inferiority comparisons between these pre-training methods exhibit distinct characteristics from those on ImageNet. We find that down-
72
+
73
+ Table 3. Comparisons with previous SOTA networks on ImageNet-1k. We report top-1 accuracy along with throughput and parameter count. The throughput is borrowed from timm (Wightman, 2019), which is measured on a single RTX 3090 GPU with a batch size fixed to 1024 and mixed precision. $\ddagger$ indicates that distillation is adopted during the supervised training (or fine-tuning). \*\* indicates the original architecture of ViT-Tiny (the number of attention heads is 3).
74
+
75
+ <table><tr><td>Methods</td><td>pre-train data</td><td>fine-tuning epochs</td><td>#param.</td><td>throughput (image/s)</td><td>Accuracy Top-1 (%)</td></tr><tr><td colspan="6">ConvNets</td></tr><tr><td>ResNet-18 (He et al., 2016)</td><td>-</td><td>100</td><td>11.7M</td><td>8951</td><td>69.7</td></tr><tr><td>ResNet-50 (He et al., 2016; Wightman et al., 2021)</td><td>-</td><td>600</td><td>25.6M</td><td>2696</td><td>80.4</td></tr><tr><td>EfficientNet-B0 (Tan &amp; Le, 2019)</td><td>-</td><td>450</td><td>5.3M</td><td>5369</td><td>77.7</td></tr><tr><td>EfficientNet-B0 (Fang et al., 2020)</td><td>IN1K w/o labels</td><td>450</td><td>5.3M</td><td>5369</td><td>77.2 (-0.5)</td></tr><tr><td>EfficientNet-B1 (Tan &amp; Le, 2019)</td><td>-</td><td>450</td><td>7.8M</td><td>2953</td><td>78.8</td></tr><tr><td>MobileNet-v2 (Sandler et al., 2018)</td><td>-</td><td>480</td><td>3.5M</td><td>7909</td><td>72.0</td></tr><tr><td>MobileNet-v3 (Howard et al., 2019)</td><td>-</td><td>600</td><td>5.5M</td><td>9113</td><td>75.2</td></tr><tr><td>MobileNet-v3†(Ridnik et al., 2021)</td><td>IN21K</td><td>600</td><td>5.5M</td><td>9113</td><td>78.0</td></tr><tr><td>ConvNeXt V1-F (Liu et al., 2022)</td><td>-</td><td>600</td><td>5.2M</td><td>-</td><td>77.5</td></tr><tr><td>ConvNeXt V2-F (Woo et al., 2023)</td><td>-</td><td>600</td><td>5.2M</td><td>1816</td><td>78.0</td></tr><tr><td>ConvNeXt V2-F (Woo et al., 2023)</td><td>IN1K w/o labels</td><td>600</td><td>5.2M</td><td>1816</td><td>78.5 (+0.5)</td></tr><tr><td colspan="6">Vision Transformers Derivative</td></tr><tr><td>LeViT-128 (Graham et al., 2021)</td><td>-</td><td>1000</td><td>9.2M</td><td>13276</td><td>78.6</td></tr><tr><td>LeViT-192 (Graham et al., 2021)</td><td>-</td><td>1000</td><td>11.0M</td><td>11389</td><td>80.0</td></tr><tr><td>XCiT-T12/16†(Ali et al., 2021)</td><td>-</td><td>400</td><td>6.7M</td><td>3157</td><td>78.6</td></tr><tr><td>PiT-Ti†(Heo et al., 2021)</td><td>-</td><td>1000</td><td>5.1M</td><td>4547</td><td>76.4</td></tr><tr><td>CaiT-XXS-24†(Touvron et al., 2021b)</td><td>-</td><td>400</td><td>12.0M</td><td>1351</td><td>78.4</td></tr><tr><td>Swin-1G (Liu et al., 2021; Chen et al., 2021b)</td><td>-</td><td>450</td><td>7.3M</td><td>-</td><td>77.3</td></tr><tr><td>Mobile-Former-294M (Chen et al., 2021b)</td><td>-</td><td>450</td><td>11.4M</td><td>-</td><td>77.9</td></tr><tr><td>MobileViT-S (Mehta &amp; Rastegari, 2022)</td><td>-</td><td>300</td><td>5.6M</td><td>1900</td><td>78.3</td></tr><tr><td>EdgeViT-XS (Pan et al., 2022)</td><td>-</td><td>300</td><td>6.7M</td><td>-</td><td>77.5</td></tr><tr><td colspan="6">Vanilla Vision Transformers</td></tr><tr><td>DeiT-Tiny* (Touvron et al., 2021a)</td><td>-</td><td>300</td><td>5.7M</td><td>4844</td><td>72.2</td></tr><tr><td>DeiT-Tiny*†(Touvron et al., 2021a)</td><td>-</td><td>1000</td><td>5.7M</td><td>4764</td><td>76.6</td></tr><tr><td>DeiT-Tiny</td><td>-</td><td>300</td><td>5.7M</td><td>4020</td><td>76.2</td></tr><tr><td>MAE-Tiny-FT</td><td>IN1K w/o labels</td><td>300</td><td>5.7M</td><td>4020</td><td>78.5 (+2.3)</td></tr><tr><td>DeiT-Tiny</td><td>-</td><td>1000</td><td>5.7M</td><td>4020</td><td>77.8</td></tr><tr><td>MAE-Tiny-FT</td><td>IN1K w/o labels</td><td>1000</td><td>5.7M</td><td>4020</td><td>79.0 (+1.2)</td></tr></table>
76
+
77
+ Table 4. Transfer evaluation on classification tasks and dense-prediction tasks. Self-supervised pre-training approaches generally show inferior performance to the fully-supervised counterpart. Top-1 accuracy is reported for classification tasks and AP is reported for object detection (det.) and instance segmentation (seg.) tasks. The description of each dataset is represented as (train-size/test-size/#classes).
78
+
79
+ <table><tr><td>Datasets Init.</td><td>Flowers (2k/6k/102)</td><td>Pets (4k/4k/37)</td><td>Aircraft (7k/3k/100)</td><td>Cars (8k/8k/196)</td><td>CIFAR100 (50k/10k/100)</td><td>iNat18 (438k/24k/8142)</td><td>COCO(det.) (118k/50k/80)</td><td>COCO(seg.)</td></tr><tr><td>Random</td><td>30.2</td><td>26.1</td><td>9.4</td><td>6.8</td><td>42.7</td><td>58.7</td><td>32.7</td><td>28.9</td></tr><tr><td>supervised DeiT-Tiny</td><td>96.4</td><td>93.1</td><td>73.5</td><td>85.6</td><td>85.8</td><td>63.6</td><td>40.4</td><td>35.5</td></tr><tr><td>self-supervised MoCov3-Tiny</td><td>94.8</td><td>87.8</td><td>73.7</td><td>83.9</td><td>83.9</td><td>54.5</td><td>39.7</td><td>35.1</td></tr><tr><td>MAE-Tiny</td><td>85.8</td><td>76.5</td><td>64.6</td><td>78.8</td><td>78.9</td><td>60.6</td><td>39.9</td><td>35.4</td></tr></table>
80
+
81
+ stream data scale matters. The self-supervised pre-training approaches achieve downstream performance far behind the fully-supervised counterpart, while the performance gap is narrowed more or less as the data scale of the downstream
82
+
83
+ task increases. Moreover, MAE even shows inferior results to MoCo-v3. We conjecture that it is due to their different layer behaviors during pre-training and fine-tuning, which will be discussed in detail in the following section.
84
+
85
+ Can the pre-trained models transfer well on dense prediction tasks? For a more thorough study, we further conduct evaluations on downstream object detection and segmentation tasks on COCO (Lin et al., 2014), based on Li et al. (2021) (see Appendix A.5 for details) with different pre-trained models as initialization of the backbone. The results are shown in Tab. 4. The self-supervised pre-training also lags behind the fully-supervised counterpart.
86
+
87
+ # 4. Revealing the Secrets of the Pre-Training
88
+
89
+ In this section, we introduce some model analysis methods to study the pattern of layer behaviors during pre-training and fine-tuning, and investigate what matters for downstream performances.
90
+
91
+ # 4.1. Layer Representation Analyses
92
+
93
+ We first adopt Centered Kernel Alignment (CKA) method (Cortes et al., 2012; Nguyen et al., 2020) to analyze the layer representation similarity across and within networks. Specifically, CKA computes the normalized similarity in terms of the Hilbert-Schmidt Independence Criterion (HSIC (Song et al., 2012)) between two feature maps or representations, which is invariant to the orthogonal transformation of representations and isotropic scaling (detailed in Appendix A.6).
94
+
95
+ Lower layers matter more than higher ones if sufficient downstream data is provided. We visualize the layer representation similarity between several pre-trained models and DeiT-Tiny as heatmaps in Fig. 1. We choose DeiT-Tiny, a classification model fully-supervisedly trained on IN1K, as the reference because we consider the higher similarity of the examined model's layer to that of DeiT-Tiny indicates its more relevance to recognition. Although the similarity does not directly indicate whether the downstream performance is good, it indeed reflects the pattern of layer representation to a certain extent. The similarity within DeiT-Tiny is also presented (the left column).
96
+
97
+ First, We observe a relatively high similarity between MAETiny and DeiT-Tiny for lower layers, while low similarity for higher layers. In Appendix B.1, we observe similar phenomena with several additional supervisedly trained ViTs as the reference models. It indicates that fewer semantics are extracted for MAE-Tiny at a more abstract level in higher layers. In contrast, MoCov3-Tiny aligns DeiT-Tiny well across almost all layers. However, the fine-tuning evaluation in Tab. 1 shows that adopting the MAE-Tiny as initialization improves the performance more significantly than MoCov3-Tiny. Thus, we hypothesize that lower layers matter much more than higher ones for the pre-trained models. In order to verify the hypothesis, we design another experiment by
98
+
99
+ only reserving several leading blocks of pre-trained models and randomly initializing the others, and then fine-tuning them on IN1K (for the sake of simplicity, we only fine-tune these models for 100 epochs). Fig. 2 shows that reserving only a certain number of leading blocks achieves a significant performance gain over randomly initializing all the blocks (i.e., totally training from scratch) for both MAE-Tiny and MoCov3-Tiny. Whereas, further reserving higher layers leads to marginal gain for MAE-Tiny and MoCov3-Tiny, which demonstrates our hypothesis.
100
+
101
+ Higher layers matter in data-insufficient downstream tasks. Previous works (Touvron et al., 2021a; Raghu et al., 2021) demonstrate the importance of a relatively large dataset scale for fully-supervised high-performance ViTs with large model sizes. We also observe a similar phenomenon on lightweight ViTs even when the self-supervised pre-training is adopted as discussed in Sec. 3.2. It motivates us to study the key factor of downstream performance on data-insufficient tasks.
102
+
103
+ We conduct similar experiments as those in Fig. 2 on small-scale downstream datasets. The results are shown in Fig. 3. We observe consistent performance improvement as the number of reserved pre-trained models' blocks increases. And the smaller the dataset scale, the more the performance benefits from the higher layers. It demonstrates that higher layers are still valuable and matter in data-insufficient downstream tasks. Furthermore, we observe comparable performance for the transfer performance of MAE-Tiny and MoCov3-Tiny when only a certain number of lower layers are reserved, while MoCov3-Tiny surpasses when higher layers are further reserved. It indicates that the higher layers of MoCov3-Tiny work better than MAE-Tiny on data-insufficient downstream tasks, which is also consistent with our CKA-based analyses shown in Fig. 1, that MoCov3-Tiny learns more semantics at an abstract level relevant to recognition in higher layers (high similarity to reference recognition models in higher layers) than MAE-Tiny.
104
+
105
+ # 4.2. Attention Map Analyses
106
+
107
+ The attention maps reveal the behaviors for aggregating information in the attention mechanism of ViTs, which are computed from the compatibility of queries and keys by dot-product operation. We introduce two metrics for further analyses on the pre-trained models, i.e., attention distance and attention entropy. The attention distance for the $j$ -th token of $h$ -th head is calculated as:
108
+
109
+ $$
110
+ \boldsymbol {D} _ {h, j} = \sum_ {i} \operatorname {s o f t m a x} \left(\boldsymbol {A} _ {h}\right) _ {i, j} \boldsymbol {G} _ {i, j}, \tag {1}
111
+ $$
112
+
113
+ where $\mathbf{A}_h \in \mathbb{R}^{l \times l}$ is the attention map for the $h$ -th attention head, and $\mathbf{G}_{i,j}$ is the Euclidean distance between the spatial locations of the $i$ -th and $j$ -th tokens. $l$ is the number of
114
+
115
+ ![](images/2950b17be003ba6ad5c566596a5b2a9217d533adbc4e203d096086abbcf72c80.jpg)
116
+ Figure 1. Layer representation similarity within and across models as heatmaps, with x and y axes indexing the layers (the 0 index indicates the patch embedding layer), and higher values indicate higher similarity.
117
+
118
+ ![](images/0fdc7a4ac9c0fddab0cc8ef6690edc5cc72964ddcd5c4a8cab6513a5c29c1124.jpg)
119
+
120
+ ![](images/27dba6c0a698d625c93a31827ae990e1d9b7e240ce7129ba619778e4562abf89.jpg)
121
+ Figure 2. Lower layers of pre-trained models contribute to most gains on downstream ImageNet dataset.
122
+
123
+ ![](images/a8613789e630fefa65b586d2dd53ca2fc7bdf3d7522e4e3c0ad815454acac8a8.jpg)
124
+
125
+ ![](images/65510b498963f4597c353152666980d895fe5c4ea2620973dd0c28f37b682733.jpg)
126
+ Figure 3. The contributions on performance gain from higher layers of pre-trained models increase as the downstream dataset scale shrinks, which indicates that higher layers matter in data-insufficient downstream tasks.
127
+
128
+ ![](images/800972bc023cc520195ded29c828f2f7d1b618bad6640ed8bdbd07341ba21a92.jpg)
129
+
130
+ ![](images/39b7b5142e37ca726ace601fadb1b531798575e1dbdf0662e63de8479d496b91.jpg)
131
+
132
+ tokens. And the attention entropy is calculated as:
133
+
134
+ $$
135
+ \boldsymbol {E} _ {h, j} = - \sum_ {i} \operatorname {s o f t m a x} \left(\boldsymbol {A} _ {h}\right) _ {i, j} \log \left(\operatorname {s o f t m a x} \left(\boldsymbol {A} _ {h}\right) _ {i, j}\right), \tag {2}
136
+ $$
137
+
138
+ Specifically, the attention distance reveals how much local vs. global information is aggregated, and a lower distance indicates that each token focuses more on neighbor tokens. The attention entropy reveals the concentration of the attention distribution, and lower entropy indicates that each token attends to fewer tokens. We analyze the distributions of the average attention distance and entropy across all the tokens in different attention heads, as shown in Fig. 4.
139
+
140
+ The pre-training with MAE makes the attention of the downstream models more local and concentrated. First, we compare MAE-Tiny-FT with DeiT-Tiny. The former adopts MAE-Tiny as initialization and then is fine-tuned on IN1K, and the latter is supervisedly trained from scratch (Random Init.) on IN1K. As shown in Fig. 4, we observe very similar attention behaviors between them, except that the attention of MAE-Tiny-FT (the purple box-whisker) is more local (with lower attention distance) and concentrated (with lower attention entropy) in middle layers compared with DeiT-Tiny (the red box-whisker). We attribute it to the introduction of the MAE-Tiny as pre-training (the orange box-whisker), which has lower attention distance and entropy, and may bring locality inductive bias compared with random initialization (the blue box-whisker). It is noteworthy that the locality inductive bias does not mean that
141
+
142
+ tokens in all attention heads attend to solely a few nearby tokens. The attention distance and entropy for different heads are still distributed in a wide range (except for several last layers), which indicates that the heads have diverse specializations, making the models aggregate both local and global tokens with both concentrated and broad focuses.
143
+
144
+ Then, we focus on the comparison between MAE-Tiny and MoCov3-Tiny, trying to give some explanations for their diverse downstream performances observed in Sec. 3. As shown in Fig. 4, we observe that MoCov3-Tiny (the green box-whisker) generally has more global and broad attention than MAE-Tiny (the orange box-whisker). Even several leading blocks have a narrower range of attention distance and entropy than MAE-Tiny. We think this characteristic of MoCov3-Tiny makes the downstream fine-tuning with it as initialization take "shortcuts", i.e., directly paying attention to global features and overlooking local patterns, which may be unfavorable for fine-grained recognition. It leads to inferior downstream performance on ImageNet, but fair on Flowers, CIFAR100, etc., for which the "shortcuts" may be barely adequate. As for MAE-Tiny, its distinct behaviors in higher layers with rather low attention distance and entropy may make it hard to transfer to data-insufficient downstream tasks, thus resulting in inferior performance on these tasks.
145
+
146
+ # 5. Distillation Improves Pre-Trained Models
147
+
148
+ In the previous section, we have conjectured that it is hard for MAE to learn good representation relevant to recognition
149
+
150
+ ![](images/13bdb0208c9ea74e3475d866a9095e3b93c41796f3d52064c8f66d63d4a1e160.jpg)
151
+
152
+ ![](images/dfec2259ce7acbfd7cd4a5b1c18794168713d6f60828b461dd595e66a5617812.jpg)
153
+
154
+ ![](images/407337fdac4b9437d1fd8214b557adb9e5257c0d28a0e4255e74a2d680d1e630.jpg)
155
+
156
+ ![](images/fdd673a2792e95f55d9c0e283c1f8618b8a4095292cac03ed69dfbc34b03c6fc.jpg)
157
+ Figure 4. Attention distance and entropy analyses. We visualize the distributions of the average attention distance and entropy across all tokens in different attention heads w.r.t. the layer number with box-whisker plots.
158
+
159
+ ![](images/6d01caa1173f9f2909d10c0357e21098f33adda3b346822bc6fb5a3c543db2c7.jpg)
160
+ Figure 5. Distillation compresses the good representation of the teacher (MAE-Base) to the student (D-MAE-Tiny).
161
+
162
+ ![](images/466c6f07492cd27e52c243cdd6dbb070a257720a80a6441fb4b82941af6c0560.jpg)
163
+
164
+ ![](images/50542144dc423f8e2d36a8b8fb8f7543ee483f05a1d33558bc1e041b2c1ab336.jpg)
165
+ Figure 6. Distillation on attention maps of higher layers improves performance most.
166
+
167
+ in higher layers, which results in unsatisfactory performance on data-insufficient downstream tasks. A natural question is that can it gain more semantic information by scaling up the models. We further examine a large pre-trained model, MAE-Base (He et al., 2021), and find it achieves a better alignment with the reference model, as shown in the top subfigure of Fig. 5. It indicates that it is possible to extract features relevant to recognition in higher layers for the scaled-up encoder in MAE pre-training. These observations motivate us to compress the knowledge of large pre-trained models to tiny ones with knowledge distillation under the MIM framework.
168
+
169
+ Distillation methods. Specifically, a pre-trained MAE-Base (He et al., 2021) is introduced as the teacher network. The distillation loss is constructed based on the similarity between the attention maps of the corresponding teacher's
170
+
171
+ and student's layers. It is formulated as:
172
+
173
+ $$
174
+ L _ {\mathrm {a t t n}} = \operatorname {M S E} \left(\boldsymbol {A} ^ {T}, \boldsymbol {M} \boldsymbol {A} ^ {S}\right), \tag {3}
175
+ $$
176
+
177
+ where $\mathbf{A}^T\in \mathbb{R}^{h\times l\times l}$ and $\mathbf{A}^S\in \mathbb{R}^{h'\times l\times l}$ refer to the attention maps of the corresponding teacher's and student's layers, with $h$ and $h^\prime$ attention heads respectively. $l$ is the number of tokens. A learnable mapping matrix $M\in \mathbb{R}^{h\times h'}$ is introduced to align the number of heads. MSE denotes mean squared error.
178
+
179
+ During the pre-training, the teacher processes the same un-masked image patches as the student encoder. The parameters of the student network are updated based on the joint backward gradients from the distillation loss and the original MAE's reconstruction loss, while the teacher's parameters remain frozen throughout the pre-training process.
180
+
181
+ Distill on lower or higher layers? We first examine applying the above layer-wise distillation on which pair of teacher's and student's layers contributes to the most performance gain. We conduct experiments by constructing the above attention-based distillation loss between pair of layers at $1/4$ , $2/4$ , $3/4$ , or $4/4$ depth of the teacher and student respectively, i.e., the 3rd, 6th, 9th, or 12th layer for both the teacher (MAE-Base) and the student (MAETiny). As shown in Fig. 6, distilling on the attention maps of the last transformer blocks promotes the performance most, surpassing those distilling on lower layers (for the sake of simplicity, we only fine-tune the pre-trained models on IN1K for 100 epochs). It is consistent with the analyses
182
+
183
+ Table 5. Distillation improves downstream performance on classification tasks and object detection and segmentation tasks. Top-1 accuracy is reported for classification tasks and AP is reported for object detection (det.) and instance segmentation (seg.) tasks.
184
+
185
+ <table><tr><td>Datasets Init.</td><td>Flowers</td><td>Pets</td><td>Aircraft</td><td>Cars</td><td>CIFAR100</td><td>iNat18</td><td>ImageNet</td><td>COCO(det.)</td><td>COCO(sec.)</td></tr><tr><td>supervised DeiT-Tiny</td><td>96.4</td><td>93.1</td><td>73.5</td><td>85.6</td><td>85.8</td><td>63.6</td><td>-</td><td>40.4</td><td>35.5</td></tr><tr><td>self-supervised MAE-Tiny</td><td>85.8</td><td>76.5</td><td>64.6</td><td>78.8</td><td>78.9</td><td>60.6</td><td>78.0</td><td>39.9</td><td>35.4</td></tr><tr><td>D-MAE-Tiny</td><td>95.2 (+9.4)</td><td>89.1 (+12.6)</td><td>79.2 (+14.6)</td><td>87.5 (+8.7)</td><td>85.0 (+6.1)</td><td>63.6 (+3.0)</td><td>78.4 (+0.4)</td><td>42.3 (+2.4)</td><td>37.4 (+2.0)</td></tr></table>
186
+
187
+ in Sec. 4. Specifically, the lower layers learn good representation themselves during the pre-training with MAE, and thus distilling on these layers contributes to marginal improvement, while the higher layers rely on a good teacher to guide them to capture rich semantic features.
188
+
189
+ Distillation improves downstream performance. We further evaluate the distilled pre-trained model on several downstream tasks. For simplicity, we only apply distillation on the last layers. The resulting model is denoted as D-MAE-Tiny. The visualization result at the bottom of Fig. 5 shows that the good representation relevant to the recognition of the teacher is compressed to the student. Especially the quality of higher layers is improved. The distillation contributes to better downstream performance as shown in Tab. 5, especially on data-insufficient classification tasks and dense prediction tasks. In Appendix C.3, we also show that our distillation technique can help other ViT students beyond ViT-Tiny to achieve better downstream performance.
190
+
191
+ # 6. Related Works
192
+
193
+ Self-supervised learning (SSL) focuses on different pretext tasks (Gidaris et al., 2018; Zhang et al., 2016; Noroozi & Favaro, 2016; Dosovitskiy et al., 2014) for pre-training without using manually labeled data. Among them, contrastive learning (CL) has been popular and shows promising results on various convolutional networks (ConvNets) (He et al., 2020; Chen et al., 2020; Grill et al., 2020; Caron et al., 2020) and ViTs (Chen et al., 2021a; Caron et al., 2021). Recently, methods based on masked image modeling (MIM) achieve state-of-the-art on ViTs (He et al., 2021; Bao et al., 2021; Zhou et al., 2022). It has been demonstrated that these methods can scale up well on larger models, while their performance on lightweight ViTs is seldom investigated.
194
+
195
+ Vision Transformers (ViTs) (Dosovitskiy et al., 2020) apply a Transformer architecture (a stack of attention modules (Vaswani et al., 2017)) on image patches and show very competitive results in various visual tasks (Touvron et al., 2021a; Liu et al., 2021; Li et al., 2022). The performance of ViTs has been largely improved thanks to better training recipes (Touvron et al., 2021a; Steiner et al., 2021; Touvron et al., 2022). As for lightweight ViTs, most works focus on
196
+
197
+ integrating ViTs and ConvNets (Graham et al., 2021; Heo et al., 2021; Mehta & Rastegari, 2022; Chen et al., 2021b), while few works focus on how to optimize the networks.
198
+
199
+ Knowledge Distillation is a mainstream approach for model compression (Bucilua et al., 2006), in which a large teacher network is trained first and then a more compact student network is optimized to approximate the teacher (Hinton et al., 2015; Romero et al., 2014). Touvron et al. (2021a) achieves better accuracy on ViTs by adopting a ConvNet as the teacher. With regard to the compression of the pre-trained networks, some works (Sanh et al., 2019; Jiao et al., 2020; Wang et al., 2021; Sun et al., 2020) attend to distill large-scale pre-trained language models. In the area of computer vision, a series of works (Fang et al., 2020; Abbasi Koohpayegani et al., 2020; Choi et al., 2021) focus on transferring knowledge of large pre-trained networks based on CL to lightweight ConvNets. There are few works focusing on improving the quality of lightweight pre-trained ViTs based on MIM by distillation thus far.
200
+
201
+ # 7. Discussions
202
+
203
+ Limitations. Our study is restricted to classification tasks and some dense-prediction tasks. We leave the exploration of more tasks for further work.
204
+
205
+ Conclusions. We investigate the self-supervised pretraining of lightweight ViTs, and demonstrate the usefulness of the advanced lightweight ViT pre-training strategy in improving the performance of downstream tasks, even comparable to most delicately-designed SOTA networks on ImageNet. Some properties about the pre-training are revealed, e.g., these methods fail to benefit from large-scale pre-training data, and show more dependency on the downstream dataset scale. We also present some insights on what matters for downstream performance. They may indicate potential future directions in improving pre-training on lightweight models, the value of which has also been demonstrated as it guides the design of our proposed distillation strategy and helps to achieve much better downstream performance. We expect our research may provide useful experience and advance the study of self-supervised learning on lightweight ViTs.
206
+
207
+ Acknowledgment. The authors would like to thank the anonymous reviewers for their valuable comments and suggestions. This work was supported in part by the National Key R&D Program of China (Grant No. 2020AAA0105802, 2020AAA0105800), the Natural Science Foundation of China (Grant No. U22B2056, 61972394, U2033210, 62036011, 62192782, 61721004, 62172413), the Beijing Natural Science Foundation (Grant No. L223003, JQ22014), the Major Projects of Guangdong Education Department for Foundation Research and Applied Research (Grant No. 2017KZDXM081, 2018KZDXM066), the Guangdong Provincial University Innovation Team Project (Grant No. 2020KCXTD045), the Zhejiang Provincial Natural Science Foundation (Grant No. LDT23F02024F02). Jin Gao was also supported in part by the Youth Innovation Promotion Association, CAS.
208
+
209
+ # References
210
+
211
+ Abbasi Koohpayegani, S., Tejankar, A., and Piriavihav, H. Compress: Self-supervised learning by compressing representations. Adv. Neural Inform. Process. Syst., 33:12980-12992, 2020.
212
+ Ali, A., Touvron, H., Caron, M., Bojanowski, P., Douze, M., Joulin, A., Laptev, I., Neverova, N., Synnaeve, G., Verbeek, J., et al. Xcit: Cross-covariance image transformers. Adv. Neural Inform. Process. Syst., 34, 2021.
213
+ Assran, M., Caron, M., Misra, I., Bojanowski, P., Joulin, A., Ballas, N., and Rabbat, M. Semi-supervised learning of visual features by non-parametrically predicting view assignments with support samples. In Int. Conf. Comput. Vis., pp. 8443-8452, 2021.
214
+ Ba, J. L., Kiros, J. R., and Hinton, G. E. Layer normalization. ArXiv, abs/1607.06450, 2016.
215
+ Bao, H., Dong, L., and Wei, F. Beit: Bert pre-training of image transformers. ArXiv, abs/2106.08254, 2021.
216
+ Bucilua, C., Caruana, R., and Niculescu-Mizil, A. Model compression. In ACM Int. Conf. on Knowledge Discovery and Data Mining, pp. 535-541, 2006.
217
+ Caron, M., Misra, I., Mairal, J., Goyal, P., Bojanowski, P., and Joulin, A. Unsupervised learning of visual features by contrasting cluster assignments. In Adv. Neural Inform. Process. Syst., 2020.
218
+ Caron, M., Touvron, H., Misra, I., Jégou, H., Mairal, J., Bojanowski, P., and Joulin, A. Emerging properties in self-supervised vision transformers. In Int. Conf. Comput. Vis., pp. 9650-9660, 2021.
219
+ Chen, X., Fan, H., Girshick, R., and He, K. Improved baselines with momentum contrastive learning. *ArXiv*, abs/2003.04297, 2020.
220
+
221
+ Chen, X., Xie, S., and He, K. An empirical study of training self-supervised vision transformers. In Int. Conf. Comput. Vis., pp. 9640-9649, 2021a.
222
+ Chen, Y., Dai, X., Chen, D., Liu, M., Dong, X., Yuan, L., and Liu, Z. Mobile-former: Bridging mobilenet and transformer. ArXiv, abs/2108.05895, 2021b.
223
+ Cho, J. H. and Hariharan, B. On the efficacy of knowledge distillation. In Int. Conf. Comput. Vis., pp. 4794-4802, 2019.
224
+ Choi, H. M., Kang, H., and Oh, D. Unsupervised representation transfer for small networks: I believe i can distill on-the-fly. In Adv. Neural Inform. Process. Syst., 2021.
225
+ Cortes, C., Mohri, M., and Rostamizadeh, A. Algorithms for learning kernels based on centered alignment. The Journal of Machine Learning Research, 13:795-828, 2012.
226
+ Cubuk, E. D., Zoph, B., Shlens, J., and Le, Q. Randaugment: Practical automated data augmentation with a reduced search space. In Adv. Neural Inform. Process. Syst., volume 33, pp. 18613-18624, 2020.
227
+ Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., and Fei-Fei, L. Imagenet: A large-scale hierarchical image database. In IEEE Conf. Comput. Vis. Pattern Recog., pp. 248-255, 2009.
228
+ Dosovitskiy, A., Springenberg, J. T., Riedmiller, M., and Brox, T. Discriminative unsupervised feature learning with convolutional neural networks. Adv. Neural Inform. Process. Syst., 27:766-774, 2014.
229
+ Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., et al. An image is worth 16x16 words: Transformers for image recognition at scale. In Int. Conf. Learn. Represent., 2020.
230
+ Fang, Z., Wang, J., Wang, L., Zhang, L., Yang, Y., and Liu, Z. Seed: Self-supervised distillation for visual representation. In Int. Conf. Learn. Represent., 2020.
231
+ Gidaris, S., Singh, P., and Komodakis, N. Unsupervised representation learning by predicting image rotations. In Int. Conf. Learn. Represent., 2018.
232
+ Goyal, P., Dólar, P., Girshick, R., Noordhuis, P., Wesolowski, L., Kyrola, A., Tulloch, A., Jia, Y., and He, K. Accurate, large minibatch sgd: TrainingImagenet in 1 hour. ArXiv, abs/1706.02677, 2017.
233
+ Graham, B., El-Nouby, A., Touvron, H., Stock, P., Joulin, A., Jégou, H., and Douze, M. Levit: A vision transformer in convnet's clothing for faster inference. In Int. Conf. Comput. Vis., pp. 12259-12269, 2021.
234
+
235
+ Grill, J.-B., Strub, F., Altché, F., Tallec, C., Richemond, P., Buchatskaya, E., Doersch, C., Pires, B., Guo, Z., Azar, M., et al. Bootstrap your own latent: A new approach to self-supervised learning. In Adv. Neural Inform. Process. Syst., 2020.
236
+ He, K., Zhang, X., Ren, S., and Sun, J. Deep residual learning for image recognition. In IEEE Conf. Comput. Vis. Pattern Recog., pp. 770-778, 2016.
237
+ He, K., Fan, H., Wu, Y., Xie, S., and Girshick, R. Momentum contrast for unsupervised visual representation learning. In IEEE Conf. Comput. Vis. Pattern Recog., pp. 9729-9738, 2020.
238
+ He, K., Chen, X., Xie, S., Li, Y., Dollar, P., and Girshick, R. Masked autoencoders are scalable vision learners. ArXiv, abs/2111.06377, 2021.
239
+ Heo, B., Yun, S., Han, D., Chun, S., Choe, J., and Oh, S. J. Rethinking spatial dimensions of vision transformers. In Int. Conf. Comput. Vis., pp. 11936-11945, 2021.
240
+ Hinton, G., Vinyals, O., and Dean, J. Distilling the knowledge in a neural network. ArXiv, abs/1503.02531, 2015.
241
+ Howard, A., Sandler, M., Chu, G., Chen, L.-C., Chen, B., Tan, M., Wang, W., Zhu, Y., Pang, R., Vasudevan, V., Le, Q. V., and Adam, H. Searching for mobilenetv3. In Int. Conf. Comput. Vis., 2019.
242
+ Huang, G., Sun, Y., Liu, Z., Sedra, D., and Weinberger, K. Q. Deep networks with stochastic depth. In *Eur. Conf. Comput. Vis.*, pp. 646-661, 2016.
243
+ Jiao, X., Yin, Y., Shang, L., Jiang, X., Chen, X., Li, L., Wang, F., and Liu, Q. Tinybert: Distilling bert for natural language understanding. In Findings of Empirical Methods in Natural Language Process., pp. 4163-4174, 2020.
244
+ Jin, X., Peng, B., Wu, Y., Liu, Y., Liu, J., Liang, D., Yan, J., and Hu, X. Knowledge distillation via route constrained optimization. In Int. Conf. Comput. Vis., pp. 1345-1354, 2019.
245
+ Krause, J., Stark, M., Deng, J., and Fei-Fei, L. 3d object representations for fine-grained categorization. In Int. Conf. Comput. Vis. Worksh., pp. 554-561, 2013.
246
+ Krizhevsky, A. et al. Learning multiple layers of features from tiny images. Technical Report, 2009.
247
+ Li, Y., Xie, S., Chen, X., Dollar, P., He, K., and Girshick, R. Benchmarking detection transfer learning with vision transformers. *ArXiv*, abs/2111.11429, 2021.
248
+
249
+ Li, Y., Mao, H., Girshick, R., and He, K. Exploring plain vision transformer backbones for object detection. ArXiv, abs/2203.16527, 2022.
250
+ Lin, T.-Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Dollar, P., and Zitnick, C. L. Microsoft coco: Common objects in context. In *Eur. Conf. Comput. Vis.*, pp. 740-755, 2014.
251
+ Liu, Z., Miao, Z., Zhan, X., Wang, J., Gong, B., and Yu, S. X. Large-scale long-tailed recognition in an open world. In IEEE Conf. Comput. Vis. Pattern Recog., 2019.
252
+ Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., and Guo, B. Swin transformer: Hierarchical vision transformer using shifted windows. In Int. Conf. Comput. Vis., pp. 10012-10022, 2021.
253
+ Liu, Z., Mao, H., Wu, C.-Y., Feichtenhofer, C., Darrell, T., and Xie, S. A convnet for the 2020s. In IEEE Conf. Comput. Vis. Pattern Recog., pp. 11966-11976, 2022.
254
+ Loshchilov, I. and Hutter, F. Sgdr: Stochastic gradient descent with warm restarts. ArXiv, abs/1608.03983, 2016.
255
+ Maji, S., Rahtu, E., Kannala, J., Blaschko, M., and Vedaldi, A. Fine-grained visual classification of aircraft. *ArXiv*, abs/1306.5151, 2013.
256
+ Mehta, S. and Rastegari, M. Mobilevit: Light-weight, general-purpose, and mobile-friendly vision transformer. In Int. Conf. Learn. Represent., 2022.
257
+ Mirzadeh, S. I., Farajtabar, M., Li, A., Levine, N., Matsukawa, A., and Ghasemzadeh, H. Improved knowledge distillation via teacher assistant. In AAAI Conf. on Artificial Intelligence, volume 34, pp. 5191-5198, 2020.
258
+ Newell, A. and Deng, J. How useful is self-supervised pretraining for visual tasks? In IEEE Conf. Comput. Vis. Pattern Recog., pp. 7345-7354, 2020.
259
+ Nguyen, T., Raghu, M., and Kornblith, S. Do wide and deep networks learn the same things? uncovering how neural network representations vary with width and depth. In Int. Conf. Learn. Represent., 2020.
260
+ Nilsback, M.-E. and Zisserman, A. Automated flower classification over a large number of classes. In Indian Conference on Computer Vision, Graphics & Image Processing, pp. 722-729, 2008.
261
+ Noroozi, M. and Favaro, P. Unsupervised learning of visual representations by solving jigsaw puzzles. In *Eur. Conf. Comput. Vis.*, pp. 69-84, 2016.
262
+ Pan, J., Bulat, A., Tan, F., Zhu, X., Dudziak, L., Li, H., Tzimiropoulos, G., and Martinez, B. Edgevits: Competing light-weight cnns on mobile devices with vision transformers. ArXiv, abs/2205.0343, 2022.
263
+
264
+ Parkhi, O. M., Vedaldi, A., Zisserman, A., and Jawahar, C. V. Cats and dogs. In IEEE Conf. Comput. Vis. Pattern Recog., pp. 3498-3505, 2012.
265
+ Raghu, M., Unterthiner, T., Kornblith, S., Zhang, C., and Dosovitskiy, A. Do vision transformers see like convolutional neural networks? Adv. Neural Inform. Process. Syst., 34, 2021.
266
+ Ridnik, T., Ben-Baruch, E., Noy, A., and Zelnik-Manor, L. Imagenet-21k pretraining for the masses. *ArXiv*, abs/2104.10972, 2021.
267
+ Romero, A., Ballas, N., Kahou, S. E., Chassang, A., Gatta, C., and Bengio, Y. Fitnets: Hints for thin deep nets. *ArXiv*, abs/1412.6550, 2014.
268
+ Sandler, M., Howard, A., Zhu, M., Zhmoginov, A., and Chen, L.-C. Mobilenetv2: Inverted residuals and linear bottlenecks. In IEEE Conf. Comput. Vis. Pattern Recog., pp. 4510-4520, 2018.
269
+ Sanh, V., Debut, L., Chaumont, J., and Wolf, T. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter. ArXiv, abs/1910.01108, 2019.
270
+ Song, L., Smola, A., Gretton, A., Bedo, J., and Borgwardt, K. Feature selection via dependence maximization. The Journal of Machine Learning Research, 13(5), 2012.
271
+ Steiner, A., Kolesnikov, A., Zhai, X., Wightman, R., Uszkoreit, J., and Beyer, L. How to train your vit? data, augmentation, and regularization in vision transformers. ArXiv, abs/2106.10270, 2021.
272
+ Sun, Z., Yu, H., Song, X., Liu, R., Yang, Y., and Zhou, D. Mobilebert: a compact task-agnostic bert for resource-limited devices. In Association for Computational Linguistics, pp. 2158-2170, 2020.
273
+ Tan, M. and Le, Q. Efficientnet: Rethinking model scaling for convolutional neural networks. In Int. Conf. Machine Learning., pp. 6105-6114, 2019.
274
+ Touvron, H., Cord, M., Douze, M., Massa, F., Sablayrolles, A., and Jegou, H. Training data-efficient image transformers & distillation through attention. In Int. Conf. Machine Learning., volume 139, pp. 10347-10357, 2021a.
275
+ Touvron, H., Cord, M., Sablayrolles, A., Synnaeve, G., and Jégou, H. Going deeper with image transformers. In Int. Conf. Comput. Vis., pp. 32-42, 2021b.
276
+ Touvron, H., Cord, M., and Jégou, H. Deit iii: Revenge of the vit. ArXiv, abs/2204.07118, 2022.
277
+ Van Horn, G., Mac Aodha, O., Song, Y., Cui, Y., Sun, C., Shepard, A., Adam, H., Perona, P., and Belongie, S. The inaturalist species classification and detection dataset. In IEEE Conf. Comput. Vis. Pattern Recog., 2018.
278
+
279
+ Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. Attention is all you need. Adv. Neural Inform. Process. Syst., 30, 2017.
280
+ Wang, W., Bao, H., Huang, S., Dong, L., and Wei, F. Minilmv2: Multi-head self-attention relation distillation for compressing pretrained transformers. In Findings of Int. Joint Conf. on Natural Language Process., pp. 2140–2151, 2021.
281
+ Wightman, R. Pytorch image models. https://github.com/rwrightman/pytorch-image-models, 2019.
282
+ Wightman, R., Touvron, H., and Jégou, H. Resnet strikes back: An improved training procedure in timm. ArXiv, abs/2110.00476, 2021.
283
+ Woo, S., Debnath, S., Hu, R., Chen, X., Liu, Z., Kweon, I.-S., and Xie, S. Convnext v2: Co-designing and scaling convnets with masked autoencoders. ArXiv, abs/2301.00808, 2023.
284
+ Xie, Z., Zhang, Z., Cao, Y., Lin, Y., Bao, J., Yao, Z., Dai, Q., and Hu, H. Simmim: A simple framework for masked image modeling. In IEEE Conf. Comput. Vis. Pattern Recog., 2022.
285
+ Yun, S., Han, D., Oh, S. J., Chun, S., Choe, J., and Yoo, Y. Cutmix: Regularization strategy to train strong classifiers with localizable features. In Int. Conf. Comput. Vis., pp. 6023-6032, 2019.
286
+ Zhang, H., Cisse, M., Dauphin, Y. N., and Lopez-Paz, D. mixup: Beyond empirical risk minimization. In Int. Conf. Learn. Represent., 2018.
287
+ Zhang, R., Isola, P., and Efros, A. A. Colorful image colorization. In Eur. Conf. Comput. Vis., pp. 649-666, 2016.
288
+ Zhou, J., Wei, C., Wang, H., Shen, W., Xie, C., Yuille, A., and Kong, T. ibot: Image bert pre-training with online tokenizer. Int. Conf. Learn. Represent., 2022.
289
+
290
+ # A. Experimental Details
291
+
292
+ # A.1. Evaluation Details for MAE and MoCo-v3 on ImageNet
293
+
294
+ We follow the common practice of supervised ViT training (Touvron et al., 2021a) for fine-tuning evaluation except for some hyper-parameters of augmentation. The default setting is in Tab. A1. We use the linear $lr$ scaling rule (Goyal et al., 2017): $lr = \text{base} lr \times \text{batchsize} / 256$ . We use layer-wise $lr$ decay following (Bao et al., 2021; He et al., 2021), and the decay rate is tuned respectively for MAE and MoCo-v3.
295
+
296
+ Besides, we use global average pooling (GAP) after the final block during the fine-tuning of both the MAE and MoCo-v3-based pre-trained models, which is, however, not the common practice for MoCo-v3 (Chen et al., 2021a). We adopt it as it significantly helps to surpass the model using the original configuration based on a class token ( $76.8\%$ vs. $73.7\%$ top-1 accuracy) for the lightweight ViT-Tiny.
297
+
298
+ Table A1. Fine-tuning evaluation settings.
299
+
300
+ <table><tr><td>config</td><td>value</td></tr><tr><td>optimizer</td><td>AdamW</td></tr><tr><td>base learning rate</td><td>1e-3</td></tr><tr><td>weight decay</td><td>0.05</td></tr><tr><td>optimizer momentum</td><td>β1, β2 = 0.9, 0.999</td></tr><tr><td>layer-wise lr decay (Bao et al., 2021)</td><td>0.85 (MAE), 0.75 (MoCo-v3)</td></tr><tr><td>batch size</td><td>1024</td></tr><tr><td>learning rate schedule</td><td>cosine decay (Loshchilov &amp; Hutter, 2016)</td></tr><tr><td>warmup epochs</td><td>5</td></tr><tr><td>training epochs</td><td>{100, 300, 1000}</td></tr><tr><td>augmentation</td><td>RandAug(10, 0.5) (Cubuk et al., 2020)</td></tr><tr><td>colorjitter</td><td>0.3</td></tr><tr><td>label smoothing</td><td>0</td></tr><tr><td>mixup (Zhang et al., 2018)</td><td>0.2</td></tr><tr><td>cutmix (Yun et al., 2019)</td><td>0</td></tr><tr><td>drop path (Huang et al., 2016)</td><td>0</td></tr></table>
301
+
302
+ Table A2. Pre-training setting for MoCo-v3.
303
+
304
+ <table><tr><td>config</td><td>value</td></tr><tr><td>optimizer</td><td>AdamW</td></tr><tr><td>base learning rate</td><td>1.5e-4</td></tr><tr><td>weight decay</td><td>0.1</td></tr><tr><td>optimizer momentum</td><td>β1, β2 = 0.9, 0.999</td></tr><tr><td>batch size</td><td>1024</td></tr><tr><td>learning rate schedule</td><td>cosine decay</td></tr><tr><td>warmup epochs</td><td>40</td></tr><tr><td>training epochs</td><td>400</td></tr><tr><td>momentum coefficient</td><td>0.99</td></tr><tr><td>temperature</td><td>0.2</td></tr></table>
305
+
306
+ # A.2. Pre-Training Details of MAE
307
+
308
+ Our experimental setup on MAE largely follows those of MAE (He et al., 2021), including the optimizer, learning rate, batch size, argumentation, etc. But several basic factors and components are adjusted to fit the smaller encoder. We find MAE prefers a much more lightweight decoder when the encoder is small, thus a decoder with only one Transformer block is adopted by default and the width is 192. We sweep over 5 masking ratios $\{0.45, 0.55, 0.65, 0.75, 0.85\}$ and find 0.75 achieves the best performance.
309
+
310
+ # A.3. Pre-Training Details of MoCo-v3
311
+
312
+ We reimplement MoCo-v3 (Chen et al., 2021a) with ViT-Tiny as encoder and largely follow the original setups. The default setting is in Tab. A2.
313
+
314
+ Chen et al. (2021a) observes that instability is a major issue that impacts self-supervised ViT training and causes mild degradation in accuracy, and a simple trick by adopting fixed random patch projection (the first layer of a ViT model) is proposed to improve stability in practice. However, we find that stability is not the main issue for small networks. Higher performance is achieved with a learned patch projection layer. Thus, this technique is not used by default.
315
+
316
+ # A.4. Transfer Evaluation Details on Classification Tasks
317
+
318
+ We evaluate several pre-trained models with transfer learning in order to measure the generalization ability of these models. We use 6 popular vision datasets: Flowers-102 (Flowers for short) (Nilsback & Zisserman, 2008), Oxford-IIIT Pets (Pets) (Parkhi et al., 2012), FGVC-Aircraft (Aircraft) (Maji et al., 2013), Stanford Cars (Cars) (Krause et al., 2013), Cifar100 (Krizhevsky et al., 2009), iNaturalist 2018 (iNat18) (Van Horn et al., 2018). For all these datasets except iNat18, we fine-tune with SGD (momentum=0.9), and the batch size is set to 512. The learning rates are swept over 3 candidates and the training epochs are swept over 2 candidates per dataset as detailed in Tab. A3. We adopt a cosine decay learning rate schedule (Loshchilov & Hutter, 2016) with a linear warm-up. we resize images to ${224} \times {224}$ . We adopt random resized crop and random horizontal flipping as augmentations and do not use any regularization (e.g., weight decay, dropout, or the stochastic
319
+
320
+ Table A3. Transfer evaluation details.
321
+
322
+ <table><tr><td>Dataset</td><td>Learning rate</td><td>Total epochs and warm-up epochs</td><td>layer-wise lr decay</td></tr><tr><td>Flowers</td><td>{0.01, 0.03, 0.1}</td><td>{(150,30),(250,50)}</td><td>{1.0, 0.75}</td></tr><tr><td>Pets</td><td>{0.01, 0.03, 0.1}</td><td>{(70,14),(150,30)}</td><td>{1.0, 0.75}</td></tr><tr><td>Aircraft</td><td>{0.01, 0.03, 0.1}</td><td>{(50,10),(100,20)}</td><td>{1.0, 0.75}</td></tr><tr><td>Cars</td><td>{0.01, 0.03, 0.1}</td><td>{(50,10),(100,20)}</td><td>{1.0, 0.75}</td></tr><tr><td>CIFAR100</td><td>{0.03, 0.1, 0.3}</td><td>{(25, 5),(50,10)}</td><td>{1.0, 0.75}</td></tr></table>
323
+
324
+ depth regularization technique (Huang et al., 2016)). For iNat18, we follow the same training configurations as those on ImageNet.
325
+
326
+ # A.5. Transfer Evaluation Details on Dense Prediction Tasks
327
+
328
+ We reproduce the setup in (Li et al., 2021), except for replacing the backbone with ViT-Tiny and decreasing the input image size from 1024 to 640 to make it trainable on a single machine with 8 NVIDIA V100. We fine-tune for up to 100 epochs on COCO (Lin et al., 2014), with different pre-trained models as initialization of the backbone. We do not use layer-wise $lr$ decay since we find it useless for the tiny backbone on the detection tasks. The weight decay is 0.05 and the stochastic depth regularization (Huang et al., 2016) is not used.
329
+
330
+ # A.6. Analysis Methods
331
+
332
+ We adopt the Centered Kernel Alignment (CKA) metric to analyze the representation similarity $(S_{rep})$ within and across networks. Specifically, CKA takes two feature maps (or representations) $X$ and $Y$ as input and computes their normalized similarity in terms of the Hilbert-Schmidt Independence Criterion (HSIC) as
333
+
334
+ $$
335
+ S _ {r e p} (\boldsymbol {X}, \boldsymbol {Y}) = \operatorname {C K A} (\boldsymbol {K}, \boldsymbol {L}) = \frac {\operatorname {H S I C} (\boldsymbol {K} , \boldsymbol {L})}{\sqrt {\operatorname {H S I C} (\boldsymbol {K} , \boldsymbol {K}) \operatorname {H S I C} (\boldsymbol {L} , \boldsymbol {L})}}, \tag {A1}
336
+ $$
337
+
338
+ where $\pmb{K} = \pmb{X}\pmb{X}^{\mathrm{T}}$ and $\pmb{L} = \pmb{Y}\pmb{Y}^{\mathrm{T}}$ denote the Gram matrices for the two feature maps. A minibatch version is adopted by using an unbiased estimator of HSIC (Nguyen et al., 2020) to work at scale with our networks. We select the normalized version of the output representation of each Transformer block (consisting of a multi-head self-attention (MHA) block and an MLP block). Specifically, we select the feature map after the first LayerNorm (LN) (Ba et al., 2016) of the next block as the representation of this Transformer block as depicted in Fig. A1.
339
+
340
+ ![](images/e2dde7ddd385ff1f49b12bbefda66d5d513a77f254b707058ee4f61887cff35f.jpg)
341
+ Figure A1. Transformer block.
342
+
343
+ # B. More Analyses on the Pre-Training
344
+
345
+ # B.1. Analyses with More Models as Reference
346
+
347
+ In Sec. 4, the analyses are mainly conducted by adopting the supervisedly trained DeiT-Tiny as the reference model. Here, we additionally introduce stronger recognition models as references to demonstrate the generalizability of our analyses. Specifically, we use ViT-Base models trained with various recipes as references, e.g., DeiT-Base (supervisedly trained on IN1K following Touvron et al. (2021a) and achieves $82.0\%$ top-1 accuracy on ImageNet), ViT-Base-21k (supervisedly trained on IN21K following Steiner et al. (2021)), ViT-Base-21k-1k (first pre-trained on IN21K and then fine-tuned on IN1K following Steiner et al. (2021), achieving $84.5\%$ top-1 accuracy on ImageNet). The layer representation similarity is presented in Fig. A2.
348
+
349
+ First, we observe that our default reference model, DeiT-Tiny, is aligned well with these larger models (as shown in the left column of Fig. A2). We conjecture that the supervisedly trained ViTs generally have similar layer representation structures. Based on these stronger reference models, we observe similar phenomena for MAE-Tiny and MoCov3-Tiny as discussed in Sec. 4, which demonstrates the robustness of our analyses and conclusions w.r.t. different reference models.
350
+
351
+ Then, we analyze the larger MAE-Base with these newly introduced models as references, as shown in the last column of Fig. A2. We observe that MAE-Base still aligns relatively well with these much stronger recognition models, which supports
352
+
353
+ ![](images/6379b0a4989fabef567f7cc52c81823a97c839cc97931414c2de2bad9c6740ec.jpg)
354
+
355
+ ![](images/59c8b9ef6d99f2eac72289c1d4f61f8fa35887434d32fb61d96d196e4cf6440c.jpg)
356
+
357
+ ![](images/babef7d4ca8ab2c77562d4a75eb92c15ffabbdd2885ad1053adc4ecf793dbbf8.jpg)
358
+
359
+ ![](images/3136583be829c48b0c8779d262a088d9b8b453f795fc8d175bc4d0ae6dc6db62.jpg)
360
+
361
+ ![](images/5f555b7bfdab2f43bb27a26eea982b24e5e2fc4509e3455ec0e6e1834b34e218.jpg)
362
+
363
+ ![](images/b8cf178b458c7145918694cc455305687732826a8f357b8dcf780a18272efc13.jpg)
364
+
365
+ ![](images/a56a562af898f8f6734e6df15c18d61a730ecb169d50f26dc99ab9ee5aa02fb8.jpg)
366
+
367
+ ![](images/baaf297bb80eab5792f46f0df0ad162c91af5f46fba78436281aad80cea98d26.jpg)
368
+
369
+ ![](images/871dd51efef4dc3c677cd9982eec5faf8ea95c89afe251ae88389e39fe7026ff.jpg)
370
+ Figure A2. Layer representation analyses with DeiT-Base (supervisedly trained on IN1K, the top row), ViT-Base-21k (supervisedly trained on IN21K, the middle row), and ViT-Base-21k-1k (supervisedly pre-trained on IN21K and fine-tuned on IN1K, the bottom row) as the reference models.
371
+
372
+ ![](images/6b5582754b636ee1ced2d6f0edb72e8c4102e199ea8853ef7a29c99469ce22fe.jpg)
373
+
374
+ ![](images/c1fe760d8b2531260a620b4f2d1d0babf7a8a4f92976a35c23a23564fa52460d.jpg)
375
+
376
+ ![](images/979fbafc50c448f499b58823dd3bdd16a853daaa67ecc9ff23efdf9635656bf3.jpg)
377
+
378
+ our claim in Sec. 5 that it is possible to extract features relevant to recognition in higher layers for the scaled-up encoder in MAE pre-training. It is the prerequisite for the improvement of the pre-trained models from the proposed distillation.
379
+
380
+ # B.2. Analyses Based on Linear Probing Evaluation
381
+
382
+ Our analyses are mainly based on the fine-tuning evaluation. In this section, we present some experimental results based on linear probing evaluation, in which only a classifier is tuned during the downstream training while the pre-trained representations are kept frozen. It reflects how the representations obtained by the pre-trained models are linearly separable w.r.t. semantic categories.
383
+
384
+ As shown in Tab. A4, the linear probing performance is consistently lower than the fine-tuning performance. Coupled with the case that linear probing does not save much training time for evaluating lightweight models, it is not a proper way to utilize the pre-trained models compared to the fine-tuning setting.
385
+
386
+ Furthermore, the linear probing evaluation results do not reflect fine-tuned performance according to Tab. A4 and Tab. 4, especially for those downstream tasks with relatively sufficient labeled data, e.g., iNat18, ImageNet, thus may lead to an underestimation of the value of some pre-trained models in the practical utility on downstream tasks. We attribute it to that linear probing only evaluates the final representation of the pre-trained models, which makes it overlook the value of providing good initialization for lower layers. For instance, MAE-Tiny is better at it than MoCov3-Tiny.
387
+
388
+ Additionally, the inferior linear probing results of MAE-Tiny to MoCov3-Tiny also support our analyses in Sec. 4.1 that MoCov3-Tiny learns more semantics at an abstract level relevant to recognition in higher layers than MAE-Tiny. But our proposed distillation technique can improve the results to a certain extent.
389
+
390
+ Table A4. Linear probing evaluation of pre-trained models on downstream classification tasks. Top-1 accuracy is reported.
391
+
392
+ <table><tr><td>Datasets Init.</td><td>Flowers</td><td>Pets</td><td>Aircraft</td><td>Cars</td><td>CIFAR100</td><td>iNat18</td><td>ImageNet</td></tr><tr><td>supervised DeiT-Tiny</td><td>91.0</td><td>92.0</td><td>41.2</td><td>47.9</td><td>73.6</td><td>39.8</td><td>-</td></tr><tr><td>self-supervised MoCov3-Tiny</td><td>93.2</td><td>83.5</td><td>44.8</td><td>44.5</td><td>73.4</td><td>36.2</td><td>62.1</td></tr><tr><td>MAE-Tiny</td><td>48.9</td><td>25.0</td><td>12.8</td><td>8.8</td><td>31.0</td><td>1.4</td><td>23.3</td></tr><tr><td>D-MAE-Tiny</td><td>77.1</td><td>55.5</td><td>20.1</td><td>16.4</td><td>58.4</td><td>10.7</td><td>42.0</td></tr></table>
393
+
394
+ Table A5. Comparisons on more pre-training methods. It is a revised version of Tab. 1 in the main paper with more self-supervised pre-training methods.
395
+
396
+ <table><tr><td rowspan="2">Methods</td><td rowspan="2">Pre-training Data</td><td rowspan="2">Epochs</td><td rowspan="2">Time (hour)</td><td colspan="2">Fine-tuning</td></tr><tr><td>recipe</td><td>Top-1 Acc. (%)</td></tr><tr><td>from scratch</td><td>-</td><td>-</td><td>-</td><td>ori.</td><td>74.5</td></tr><tr><td>from scratch</td><td>-</td><td>-</td><td>-</td><td>impr.</td><td>75.8</td></tr><tr><td>Supervised (Steiner et al., 2021)</td><td>IN21K w/ labels</td><td>30</td><td>20</td><td>impr.</td><td>76.9</td></tr><tr><td>Supervised (Steiner et al., 2021)</td><td>IN21K w/ labels</td><td>300</td><td>200</td><td>impr.</td><td>77.8</td></tr><tr><td>MoCo-v3 (Chen et al., 2021a)</td><td>IN1K w/o labels</td><td>400</td><td>52</td><td>impr.</td><td>76.8</td></tr><tr><td>MAE (He et al., 2021)</td><td>IN1K w/o labels</td><td>400</td><td>23</td><td>impr.</td><td>78.0</td></tr><tr><td>DINO (Caron et al., 2021)</td><td>IN1K w/o labels</td><td>400</td><td>83</td><td>impr.</td><td>77.2</td></tr><tr><td>SimMIM (Xie et al., 2022)</td><td>IN1K w/o labels</td><td>400</td><td>40</td><td>impr.</td><td>77.9</td></tr><tr><td>D-MAE-Tiny (ours)</td><td>IN1K w/o labels</td><td>400</td><td>26</td><td>impr.</td><td>78.4</td></tr></table>
397
+
398
+ Table A6. Transfer evaluation on classification tasks and dense-prediction tasks for more pre-training methods. It is a revised version of Tab. 4 in the main paper with more self-supervised pre-training methods.
399
+
400
+ <table><tr><td>Datasets Init.</td><td>Flowers (2k/6k/102)</td><td>Pets (4k/4k/37)</td><td>Aircraft (7k/3k/100)</td><td>Cars (8k/8k/196)</td><td>CIFAR100 (50k/10k/100)</td><td>iNat18 (438k/24k/8142)</td><td colspan="2">COCO(det.) (118k/50k/80)</td></tr><tr><td>supervised DeiT-Tiny</td><td>96.4</td><td>93.1</td><td>73.5</td><td>85.6</td><td>85.8</td><td>63.6</td><td>40.4</td><td>35.5</td></tr><tr><td>self-supervised MoCov3-Tiny</td><td>94.8</td><td>87.8</td><td>73.7</td><td>83.9</td><td>83.9</td><td>54.5</td><td>39.7</td><td>35.1</td></tr><tr><td>MAE-Tiny</td><td>85.8</td><td>76.5</td><td>64.6</td><td>78.8</td><td>78.9</td><td>60.6</td><td>39.9</td><td>35.4</td></tr><tr><td>DINO-Tiny</td><td>95.6</td><td>89.3</td><td>73.6</td><td>84.5</td><td>84.7</td><td>58.7</td><td>41.4</td><td>36.7</td></tr><tr><td>SimMIM-Tiny</td><td>77.2</td><td>68.9</td><td>55.9</td><td>70.4</td><td>77.7</td><td>60.8</td><td>39.3</td><td>34.8</td></tr><tr><td>D-MAE-Tiny (ours)</td><td>95.2</td><td>89.1</td><td>79.2</td><td>87.5</td><td>85.0</td><td>63.6</td><td>42.3</td><td>37.4</td></tr></table>
401
+
402
+ # B.3. Analyses for More Self-Supervised Pre-Training Methods
403
+
404
+ In the main paper, our analyses mainly focus on MAE (He et al., 2021) and MoCov3 (Chen et al., 2021a). In this section, more self-supervised pre-training methods are involved. Specifically, another MIM-based method, SimMIM (Xie et al., 2022), and another CL-based method, DINO (Caron et al., 2021), are evaluated based on the lightweight ViT-Tiny. The 400-epoch pre-trained models are denoted as SimMIM-Tiny and DINO-Tiny respectively.
405
+
406
+ We first evaluate their downstream performance on ImageNet and other classification tasks, and object detection and segmentation tasks, as shown in Tab. A5 and Tab. A6. They are also revised versions of Tab. 1 and Tab. 4 in the main paper. According to the results, we find that MIM-based methods are generally superior to CL-based methods on data-sufficient tasks, e.g., ImageNet and iNat18, while inferior on data-insufficient tasks. Downstream data scale matters for all these methods and none of them achieve consistent superiority on all downstream tasks.
407
+
408
+ Then we explore the layer representation of these models by CKA-based similarity analyses, as shown in Fig. A3. We observe similar layer representation structures for both MIM family and CL family. For instance, SimMIM-Tiny also learns poor semantics on higher layers.
409
+
410
+ Finally, we carry out the attention analyses for these models, as shown in Fig. A4. We also observe consistent properties for MIM family and CL family. SimMIM-Tiny also tends to focus on local patterns with concentrated attention in higher layers like MAE-Tiny, while DINO-Tiny behaves like MoCov3-Tiny and has broad and global attention in higher layers.
411
+
412
+ ![](images/213f62d6b8e70d116daf940ea258964dc51a844bd637fecc27147213d6916b28.jpg)
413
+ Figure A3. Layer representation analyses for more self-supervised pre-trained models.
414
+
415
+ ![](images/1cf31734429d765940a6319e225908d0089cfef9b921b32f14ec70dcef03a565.jpg)
416
+
417
+ ![](images/1dd39baefb1ffe06507e2f82b0a1b6f6cac18d159fb533b8f25c3333f351cab9.jpg)
418
+
419
+ ![](images/af6775acd9215c11e940e213fb0799b5a6505a2c03fe6bb14752307dcfbd49c0.jpg)
420
+
421
+ ![](images/f5e77c4c1d7ff3939a7fe392004fd32e8bd0ca1e9f1557b4ce8896b7fdc9f16f.jpg)
422
+
423
+ ![](images/c39e083a51f7950486678d0f4141845e40b263fc6b175f70be82b7cb7eaaec7a.jpg)
424
+
425
+ ![](images/ce5bf4219127d33c84d97b7ae025a8dfdca1eab90a11a1fe3fd8198bd11ede2f.jpg)
426
+
427
+ ![](images/fa53a63b760450c66ab2182d5fbee5cc8b4775ec3a92afedf2b900505f26ab42.jpg)
428
+
429
+ ![](images/c95db76a8e4f3178da94eb19140bf3c55a5bcfee7e99024fd030bec89918fcb7.jpg)
430
+ Figure A4. Attention analyses for more self-supervised pre-trained models.
431
+
432
+ ![](images/9bd1c725a1b90c8a173871b7e668d0aeceb491a97de1ea5a1c0798dd19212624.jpg)
433
+
434
+ ![](images/0293687d67b3aeaf630f516e3974a5b1dafe2f55bf7a872ec298f3ae0b2967ac.jpg)
435
+
436
+ ![](images/e6843f042ea60b36e14363348f4f3dc4d6ac353512a3a5c26fbc7deb546081f3.jpg)
437
+
438
+ # C. More Analyses on Distillation
439
+
440
+ # C.1. Illustration of the Distillation Process
441
+
442
+ We illustrate our distillation process in Fig. A5 for a better presentation and explanation.
443
+
444
+ Based on the mask auto-encoder, we introduce a teacher ViT, which is pre-trained with MAE. During pre-training, the teacher processes the same visible image patches as the student encoder, and the attention-based distillation loss is calculated between the attention maps of the corresponding teacher's and student's layers. The parameters of the student are updated based on the joint backward gradients from the distillation loss and the original MAE's reconstruction loss, while the teacher's parameters remain frozen throughout the pre-training process.
445
+
446
+ # C.2. Attention Map Analyses for the Distilled Pre-trained Models
447
+
448
+ we analyze the attention distance and entropy of the distilled MAE-Tiny introduced in Sec. 5 (D-MAE-Tiny), which is only applied distillation on the attention map of the last layer during the pre-training with MAE. As shown in Fig. A6, we observe more global and broad attention in the higher layers of D-MAE-Tiny compared with MAE-Tiny, which behaves more like the teacher, MAE-Base. We attribute it to that the distillation on the final layer (i.e., the 12th layer) forces the distilled layer of the student to imitate the teacher's attention and also requires the several preceding layers to make changes to meet the imitation. We reckon that it may be useful to capture semantic features and improve downstream performance.
449
+
450
+ We also find the attention distance of the last layer shows more diversity: some attention heads are rather global and the others are local, and all of them are concentrated. We reckon that it shows odd behaviors for the reason that the layer can not handle both training targets from the reconstruction task and distillation restricted to the model size. But the more plentiful supervision indeed improves the quality of previous layers and thus achieves better downstream performance.
451
+
452
+ ![](images/19b254a9d8c47e1dd423284666a3a1720d0b630bbaa3759a6392d6369ba64180.jpg)
453
+ Figure A5. Illustration of the distillation process.
454
+
455
+ ![](images/2543d57495834b7739ea6de866720ab066aade4ce00af1b01639627b770fd487.jpg)
456
+
457
+ ![](images/d1885e3b9747657d2d7b4e49be15c1239e14e5e163eab130d623aa8dc7ad8c20.jpg)
458
+
459
+ ![](images/cbe912f56761215fd0b3e9cb9f9e75a2cb7904bfa8a9e7aac517b07480ddd841.jpg)
460
+
461
+ ![](images/5ce65f4470f418ea3d9117d3512e6b810115929523bd0c6905ad097ab53af917.jpg)
462
+ Figure A6. Attention distance and entropy analyses for the distilled MAE-Tiny.
463
+
464
+ ![](images/ca137ae83ecd974cf38d9f9ad876064c0d16f53ad7d05513ebd21e26309af506.jpg)
465
+
466
+ ![](images/a36078f036e6e729a46743b9713302fe4731c14b448720f801d0866f5bf1bdad.jpg)
467
+
468
+ # C.3. Applying Distillation on More Networks
469
+
470
+ To further evaluate our proposed distillation method, we additionally apply it to the pre-training of ViT-Small also with MAE-Base as the teacher. The configurations of these models are presented in Tab. A7. The transfer evaluation results are presented in Tab. A8. The transfer performance of the distilled MAE-Small (D-MAE-Small) surpasses the baseline model, MAE-Small by a large margin, which shows the efficacy of the distillation.
471
+
472
+ # C.4. Distilling with Larger Teachers
473
+
474
+ We further conduct additional experiments with various models as teachers and compared their performance on various downstream tasks (see Tab. A9). The configurations of the student model (ViT-Tiny) and teacher models are presented in Tab. A7. The results indicate that an appropriately sized teacher model provides the most improvement gains in distillation, which is a common finding in the area of knowledge distillation (Cho & Hariharan, 2019; Jin et al., 2019; Mirzadeh et al., 2020). To further investigate the impact of teacher size, we conducted CKA-based layer representation analyses of these teachers, as shown in Fig. A7. It can be seen that a teacher that is too small (MAE-Small) also suffers from degraded representation on higher layers and can not provide sufficient knowledge, while a teacher that is too large (MAE-Large) would result in a mismatch of capacity with the tiny student model, considering it has over 50 times more parameters than ViT-Tiny with different depths and attention head numbers, which leads to a little distinct learned pattern compared to the reference tiny model, and may not be suitable for the student.
475
+
476
+ Table A7. Configurations of ViTs.
477
+
478
+ <table><tr><td>Model</td><td>channel dimension</td><td>#heads</td><td>#layers</td><td>#params</td></tr><tr><td>ViT-Tiny</td><td>192</td><td>12</td><td>12</td><td>6M</td></tr><tr><td>ViT-Small</td><td>384</td><td>12‡</td><td>12</td><td>22M</td></tr><tr><td>ViT-Base</td><td>768</td><td>12</td><td>12</td><td>86M</td></tr><tr><td>ViT-Large</td><td>1024</td><td>16</td><td>24</td><td>304M</td></tr></table>
479
+
480
+ ‡ Our ViT-Small is with heads=12 following Chen et al. (2021a).
481
+
482
+ Table A8. Distillation on MAE-Small. Top-1 accuracy for the transfer performance on downstream classification tasks of pre-trained models w. or w/o. distillation is reported.
483
+
484
+ <table><tr><td>Datasets Init.</td><td>Flowers</td><td>Pets</td><td>Aircraft</td><td>Cars</td><td>CIFAR100</td><td>iNat18</td><td>ImageNet</td></tr><tr><td>supervised DeiT-Small</td><td>97.4</td><td>94.2</td><td>77.6</td><td>88.2</td><td>89.2</td><td>66.5</td><td>80.2</td></tr><tr><td>self-supervised MAE-Small</td><td>91.2</td><td>82.0</td><td>65.8</td><td>79.2</td><td>80.8</td><td>63.2</td><td>82.1</td></tr><tr><td>D-MAE-Small</td><td>95.8 (+4.6)</td><td>91.4 (+9.4)</td><td>80.7 (+14.9)</td><td>88.3 (+9.1)</td><td>87.8 (+7.0)</td><td>66.9 (+3.7)</td><td>82.5 (+0.4)</td></tr></table>
485
+
486
+ ![](images/4717288b9b14bdb122dcc7686d5934aca6a8699c1f8a1152f835ff08ac24069b.jpg)
487
+ Figure A7. Layer representation analyses of the teachers for distillation.
488
+
489
+ ![](images/07deb676f7a3043a5e8772cd0ee16325af16688cc37b4ac910e2351dbc107fb0.jpg)
490
+
491
+ ![](images/431db19a4ef5abc9dc6e832f3cbef0402b23221e561fe57d91003af42d340a99.jpg)
492
+
493
+ Table A9. Distillation with different sized teachers. Top-1 accuracy for the transfer performance on downstream classification tasks of the distilled pre-trained models is reported.
494
+
495
+ <table><tr><td colspan="2">Pre-training</td><td rowspan="2">Flowers</td><td rowspan="2">Pets</td><td rowspan="2">Aircraft</td><td colspan="4">Fine-tuning</td></tr><tr><td>Student</td><td>Teacher</td><td>Cars</td><td>CIFAR100</td><td>iNat18</td><td>ImageNet</td></tr><tr><td>MAE-Tiny</td><td>-</td><td>85.8</td><td>76.5</td><td>64.6</td><td>78.8</td><td>78.9</td><td>60.6</td><td>78.0</td></tr><tr><td>MAE-Tiny</td><td>MAE-Small</td><td>89.4</td><td>78.6</td><td>65.2</td><td>78.9</td><td>79.6</td><td>61.5</td><td>78.1</td></tr><tr><td>MAE-Tiny</td><td>MAE-Base</td><td>95.2</td><td>89.1</td><td>79.2</td><td>87.5</td><td>85.0</td><td>63.6</td><td>78.4</td></tr><tr><td>MAE-Tiny</td><td>MAE-Large</td><td>94.0</td><td>87.3</td><td>77.1</td><td>85.2</td><td>84.2</td><td>63.1</td><td>78.3</td></tr></table>
acloserlookatselfsupervisedlightweightvisiontransformers/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:129197763b99288f50b30ddcc92499f274eb9148e509ca29080f73b96c572c46
3
+ size 1413977
acloserlookatselfsupervisedlightweightvisiontransformers/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50c32f57e29c3528fcca3dbd60ecc4d1aecf9134543d65aa32d6dd4d0fdfc856
3
+ size 563352
acloserlookattheinterventionprocedureofconceptbottleneckmodels/c96d4482-d4bb-492b-abd1-1d83d3c04a21_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe4ffa834a05d14baa40c86410e14a5c6822eb8a36603f58963cd89c35d6b34a
3
+ size 142017
acloserlookattheinterventionprocedureofconceptbottleneckmodels/c96d4482-d4bb-492b-abd1-1d83d3c04a21_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4710ace612c8554427c57c6afa5f380bad99b583699c48f09eb61284046b1455
3
+ size 182077
acloserlookattheinterventionprocedureofconceptbottleneckmodels/c96d4482-d4bb-492b-abd1-1d83d3c04a21_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d782b5e5ccd02dff32043059bec7dafc4220ae4e3e361daa5c911b1ead8fc432
3
+ size 2862932
acloserlookattheinterventionprocedureofconceptbottleneckmodels/full.md ADDED
@@ -0,0 +1,730 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A Closer Look at the Intervention Procedure of Concept Bottleneck Models
2
+
3
+ Sungin Shin Yohan Jo 2* Sungsoo Ahn Namhoon Lee
4
+
5
+ # Abstract
6
+
7
+ Concept bottleneck models (CBMs) are a class of interpretable neural network models that predict the target response of a given input based on its high-level concepts. Unlike the standard end-to-end models, CBMs enable domain experts to intervene on the predicted concepts and rectify any mistakes at test time, so that more accurate task predictions can be made at the end. While such intervenability provides a powerful avenue of control, many aspects of the intervention procedure remain rather unexplored. In this work, we develop various ways of selecting intervening concepts to improve the intervention effectiveness and conduct an array of in-depth analyses as to how they evolve under different circumstances. Specifically, we find that an informed intervention strategy can reduce the task error more than ten times compared to the current baseline under the same amount of intervention counts in realistic settings, and yet, this can vary quite significantly when taking into account different intervention granularity. We verify our findings through comprehensive evaluations, not only on the standard real datasets, but also on synthetic datasets that we generate based on a set of different causal graphs. We further discover some major pitfalls of the current practices which, without a proper addressing, raise concerns on reliability and fairness of the intervention procedure.
8
+
9
+ # 1. Introduction
10
+
11
+ While deep learning has made rapid strides in recent years (LeCun et al., 2015; Jordan & Mitchell, 2015), the standard neural network models are not quite explainable, in that their decision-making process is neither straightforward to account for nor easy to control. To tackle this issue, various
12
+
13
+ ![](images/d459e784904bcfb2a6be5797aca454844a3570c72ce6c804870f09d3d89a8bc7.jpg)
14
+ (a) Diagram of CBMs
15
+
16
+ ![](images/6c7333a8275b46aa3aa6f1e7dd0fbdadccb33fa17ec46fc08b387606db9c1d37.jpg)
17
+ (b) Task vs. Concept errors
18
+ Figure 1: (a) Given input data CBMs first predict its concepts $(g : x \to c)$ , and then based on which it makes a subsequent prediction for the target response $(f : c \to y)$ . (b) Average task error (mis-classification rate) vs. the number of incorrectly predicted concepts (on the CUB dataset). The task error increases rapidly as more mistakes are made in concept prediction; e.g., making a single mistake yields $25\%$ increase in task error.
19
+
20
+ interpretable models have been proposed including, for example, those using concept activation vectors (Kim et al., 2018; Ghorbani et al., 2019), relating pixel contributions to image classification (Zhou et al., 2016; Selvaraju et al., 2017), or building intrinsically interpretable architectures (Alvarez Melis & Jaakkola, 2018).
21
+
22
+ Concept bottleneck models (CBMs) are among these to empower interpretability (Koh et al., 2020; Bahadori & Heckerman, 2021; Margeloiu et al., 2021; Mahinpei et al., 2021; Sawada & Nakamura, 2022; Zarlenga et al., 2022). Unlike standard end-to-end models, CBMs work in two steps: they first predict human-interpretable properties of a given input called concepts, and based on which, they subsequently make the final prediction for the given task. For instance, CBMs may classify the species of a bird based on its wing pattern or leg color rather than straight from the raw pixel values (see Figure 1a).
23
+
24
+ Revisited recently by Koh et al. (2020), this classic idea further facilitates human-model interaction in addition to plain interpretability, in that it allows one to intervene on the predicted concepts at test time, such that the subsequent prediction is made based on the rectified concept values. Notably, such intervention must be treated attentively as we find that correcting only a small number of mistakes on mis-predicted concepts can lead to a significant increase in
25
+
26
+ <table><tr><td>Work</td><td>Selection</td><td>Cost</td><td>Level</td><td>Imp.</td><td>Data</td><td>Rel.</td></tr><tr><td>Koh et al. (2020)</td><td>X</td><td>X</td><td>△</td><td>△</td><td>X</td><td>X</td></tr><tr><td>Chauhan et al. (2022)</td><td>✓</td><td>△</td><td>△</td><td>△</td><td>X</td><td>X</td></tr><tr><td>Sheth et al. (2022)</td><td>✓</td><td>X</td><td>△</td><td>X</td><td>X</td><td>X</td></tr><tr><td>Ours</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
27
+
28
+ Table 1: Comparison between the studies on intervention strategy of CBMs. $\triangle$ represents that the corresponding work provides only partial evaluations. Selection and Cost represent concept selection criteria and their analysis in terms of theoretical cost as will be discussed in Section 4.2. We study the effects of Level, Implementation and Data on intervention effectiveness in Sections 4.3, 4.4 and 5. Reliability of intervention practice is discussed in Section 6.
29
+
30
+ the task performance (see Figure 1b). Considering the high cost of intervention, i.e., having domain experts go over each concept requires tremendous effort, this result further indicates the necessity of efficient intervention procedures to ensure the utility of CBMs.
31
+
32
+ Despite the great potential, the intervention procedure of CBMs has not been studied much in the literature, quite surprisingly. For example, previous works tend to focus on increasing task performance (Sawada & Nakamura, 2022; Zarlenga et al., 2022) and addressing the problem of confounding factors (Bahadori & Heckerman, 2021) or information leakage (Margeloiu et al., 2021; Mahinpei et al., 2021; Havasi et al., 2022; Marconato et al., 2022). While a few concurrent works suggest new intervention methods (Chauhan et al., 2022; Sheth et al., 2022), we find that many critical aspects of the intervention procedure still remain unexplored (see Table 1).
33
+
34
+ Our contributions are summarized as follows. First of all, we develop various concept selection criteria as new intervention strategies, improving the intervention performance of CBMs quite dramatically given the same amount of intervention counts. We also provide extensive evaluations to analyze these criteria under a wide variety of experimental settings considering the theoretical cost of each criterion, levels of intervention related to test-time environments, and how to train these models or conceptualize the concept predictions. We further develop a new framework to generate synthetic data using diverse causal graphs and conduct fully controlled experiments to verify the effectiveness of intervention on varying data. These results reveal that data characteristics as well as intervention granularity can affect the intervention procedure quite significantly. Finally, we identify some pitfalls of the current intervention practices, which helps to take a step toward building trustworthy and responsible interpretable models.
35
+
36
+ # 2. Related Work
37
+
38
+ Since the seminal work of Koh et al. (2020), CBMs have evolved in many different ways. Bahadori & Heckerman (2021) develop a debiased CBM to remove the impact of confounding information to secure causality. Sawada & Nakamura (2022) augment CBMs with unsupervised concepts to improve task performance. Mahinpei et al. (2021); Margeloiu et al. (2021) suggest addressing the information leakage problem in CBMs to improve interpretability of learned concepts, while Marconato et al. (2022); Havasi et al. (2022) design new CBMs based on disentangled representations or autoregressive models. Zarlenga et al. (2022) proposes to learn semantically meaningful concepts using concept embedding models to push the accuracy-interpretability trade-off. Both Chauhan et al. (2022) and Sheth et al. (2022) present uncertainty based intervention methods to determine which concepts to intervene on. We remark that previous work is mostly focused on developing CBM variants for high task performance from model-centric perspectives, whereas our work provides in-depth analyses and comprehensive evaluations on the intervention procedure of the standard CBMs in greater granularity.
39
+
40
+ # 3. Intervention Strategies
41
+
42
+ # 3.1. Preliminary
43
+
44
+ Let $x \in \mathbb{R}^d$ , $c \in \{0,1\}^k$ , $y \in \mathcal{V}$ be input data, binary concepts, and target responses, respectively; here, $d$ and $k$ denote the dimensionality of input data and cardinality of concepts, and we assume $\mathcal{V}$ encodes categorical distribution for classification tasks. Given some input data (e.g., an image), a CBM first predicts its concepts (e.g., existing attributes in the given image) using a concept predictor $g$ and subsequently target response (e.g., class of the image) using a target predictor $f$ : i.e., first $\hat{c} = g(x)$ then $\hat{y} = f(\hat{c})$ , where $\hat{c}$ and $\hat{y}$ are predictions of concepts and target response.
45
+
46
+ In this process, one can intervene on a set of concepts $S \subseteq \{1, \dots, k\}$ so that the final prediction can be made based on rectified concept values, i.e., $\hat{y} = f(\tilde{c})$ where $\tilde{c} = \{\hat{c}_{\backslash S}, c_{S}\}$ denotes the updated concept values partly rectified on $S$ with $\hat{c}_{\backslash S}$ referring to the predicted concept values excluding $S$ .
47
+
48
+ # 3.2. Concept Selection Criteria
49
+
50
+ How should one select which concepts to intervene on? This is a fundamental question to be answered in order to legitimize CBMs in practice since intervention incurs the cost of employing experts, which would increase as with the number of intervening concepts $|S|$ . In principle, one would select a concept by which it leads to the largest increase in the task performance. To address this question and investigate the effectiveness of intervention procedure in current practice, we develop various concept selection
51
+
52
+ <table><tr><td>Criteria</td><td>Ng</td><td>Nf</td><td>Cost in complexity</td></tr><tr><td>RAND</td><td>1</td><td>1</td><td>O(τg + τf + nτi)</td></tr><tr><td>UCP</td><td>1</td><td>1</td><td>O(τg + τf + nτi)</td></tr><tr><td>LCP</td><td>1</td><td>1</td><td>O(τg + τf + nτi)</td></tr><tr><td>CCTP</td><td>1</td><td>3</td><td>O(τg + 3��f + nτi)</td></tr><tr><td>ECTP</td><td>1</td><td>2k + 2</td><td>O(τg + (2k + 2)τf + nτi)</td></tr><tr><td>EUDTP</td><td>1</td><td>2k + 2</td><td>O(τg + (2k + 2)τf + nτi)</td></tr></table>
53
+
54
+ Table 2: Theoretical cost of employing concept selection criteria to make final prediction with $n$ number of intervened concepts. ${N}_{g}$ and ${N}_{f}$ refer to the number of forward/backward passes to run $g$ and $f$ ,respectively.
55
+
56
+ criteria for which a selection score $s_i$ for $i$ -th concept is defined. Then, intervening concepts will be done based on the decreasing order of these scores.
57
+
58
+ Random (RAND) It selects concepts uniformly at random as in Koh et al. (2020). We can treat this method as assigning a random score for each concept, i.e., $s_i \sim \mathcal{U}_{[0,1]}$ . It will serve as a baseline to study the effectiveness of concept selection criteria.
59
+
60
+ Uncertainty of concept prediction (UCP) It selects concepts with the highest uncertainty of concept prediction. Specifically, it defines $s_i = \mathcal{H}(\hat{c}_i)$ where $\mathcal{H}$ is the entropy function. When the concepts are binary, it follows that $s_i = 1 / |\hat{c}_i - 0.5|$ as in Lewis & Catlett (1994); Lewis (1995). Intuitively, uncertain concepts may have an adverse influence on making the correct target prediction, and thus, they are fixed first by this criterion.
61
+
62
+ Loss on concept prediction (LCP) It selects concepts with the largest loss on concept prediction compared to the ground-truth. Specifically, it defines $s_i = |\hat{c}_i - c_i|$ . This scheme can be advantageous to increasing task performance since a low concept prediction error is likely to lead to a correct target prediction. Nonetheless, this score is unavailable in practice as the ground-truth is unknown at test time.
63
+
64
+ Contribution of concept on target prediction (CCTP) It selects concepts with the highest contribution on target prediction. Specifically, it sums up the contribution as $s_i = \sum_{j=1}^{M} \left| \hat{c}_i \frac{\partial f_j}{\partial \hat{c}_i} \right|$ where $f_j$ is the output related to $j$ -th target class and $M$ is the number of classes. This scheme is inspired by methods to explain neural network predictions (Selvaraju et al., 2017).
65
+
66
+ Expected change in target prediction (ECTP) It selects concepts with the highest expected change in the target predictive distribution with respect to intervention. Specifically, it defines $s_i = (1 - \hat{c}_i)D_{\mathrm{KL}}(\hat{y}_{\hat{c}_i = 0}\| \hat{y}) + \hat{c}_iD_{\mathrm{KL}}(\hat{y}_{\hat{c}_i = 1}\| \hat{y})$ where $D_{\mathrm{KL}}$ refers to the Kullback-Leibler divergence, and $\hat{y}_{\hat{c}_i = 0}$ and $\hat{y}_{\hat{c}_i = 1}$ refer to the new target prediction with $\hat{c}_i$ being intervened to be 0 and 1, respectively. The intuition behind this scheme is that it would be better to intervene on
67
+
68
+ ![](images/662b1efc04221034544f6db439447bc9aa6a871aa687f34bcb5240f6ee045335.jpg)
69
+ (a) Individual vs. Group
70
+
71
+ ![](images/86de1eb84b7e70fe4065f2e5a029af4e8eea56cd6a3ed62399fc68911e279347.jpg)
72
+ (b) Single vs. Batch
73
+ Figure 2: Different levels of intervention conducted on concepts. Each number represents the order of intervention.
74
+
75
+ those concepts whose rectification leads to a large expected change in target prediction (Settles et al., 2007).
76
+
77
+ Expected uncertainty decrease in target prediction (EUDTP) It selects concepts with the largest expected entropy decrease in target predictive distribution with respect to intervention. Specifically, it defines $s_i = (1 - \hat{c}_i)\mathcal{H}(\hat{y}_{\hat{c}_i = 0}) + \hat{c}_i\mathcal{H}(\hat{y}_{\hat{c}_i = 1}) - \mathcal{H}(\hat{y})$ . Intuitively, it penalizes the concepts whose expected decrease in the target prediction entropy is low when intervened (Guo & Greiner, 2007).
78
+
79
+ # 3.2.1.COST OF INTERVENTION
80
+
81
+ Note that the cost of intervention may differ by the choice of concept selection criteria. Specifically, let the theoretical cost of intervening on a concept be $\tau_{i}$ (e.g., the time for an expert to look at the input and fix its attribute), and the theoretical cost of making inference on $g$ and $f$ be $\tau_{g}$ and $\tau_{f}$ , respectively. Then, the total cost of utilizing CCTP needed up to making the final prediction with $n$ number of intervened concepts, for example, would be $\mathcal{O}(\tau_g + 3\tau_f + n\tau_i)$ ; here we assume that the cost of the backward pass on $f$ is the same as $\tau_{f}$ . We summarize the cost of all concept selection criteria in Table 2.
82
+
83
+ # 3.3. Levels of Intervention
84
+
85
+ We find that intervention can be done at different levels given some auxiliary information about the structure of concepts or economic constraints put on practitioners. For example, it is often the case that datasets used to train CBMs have the grouping information for related concepts (Wah et al., 2011). Another situation worth consideration is where one has access to a batch of data to process with a budget constraint, and the goal is to maximize the overall task performance while minimizing the intervention effort (e.g., examining medical images in a hospital). Taking into account these scenarios, we extend the intervention procedure at various levels to study the effectiveness of concept selection criteria.
86
+
87
+ Individual vs. Group intervention Intervention can be done depending on concept association (see Figure 2a):
88
+
89
+ - Individual (I): Concepts are assumed to be independent of each other and thus selected individually one at a time.
90
+ - Group (G): A group of related concepts is selected at once whose association information is subject to datasets. The selection score is computed by taking the average of selection scores of individual concepts within group.
91
+
92
+ Single vs. Batch intervention Intervention can be done depending on data accessibility (see Figure 2b):
93
+
94
+ - Single (s): Every test case is allocated with the same amount of intervention budget (e.g., intervention counts). This could be useful for online systems where each test data comes in sequentially, and experts need to process as many cases as possible under a budget constraint.
95
+ - Batch (B): A batch of test cases shares a total intervention budget. This scheme could be particularly useful when the concept prediction is imbalanced toward easy cases, and one wants to focus on intervening on hard cases so as to maximize the overall task performance.
96
+
97
+ # 4. Evaluating Intervention Strategies
98
+
99
+ # 4.1. Experiment Settings
100
+
101
+ Dataset We experiment with three datasets: (1) CUB (Wah et al., 2011) – the standard dataset used to study CBMs, (2) SkinCon (Daneshjou et al., 2022b) – a medical dataset used to build interpretable models, and (3) Synthetic – the synthetic datasets we generate based on different causal graphs to conduct a wide range of controlled experiments. Extensive details of these datasets including preprocessing, label characteristics, data splits, and the generation process are provided in Appendix A.
102
+
103
+ Implementation We follow the standard implementation protocols as in previous works. The full details including model architectures and optimization hyperparameters are provided in Appendix B. Our code is available at https://github.com/ssbin4/Closer-Intervention-CBM.
104
+
105
+ # Training
106
+
107
+ We consider the following training strategies similarly to Koh et al. (2020):
108
+
109
+ - IND: $g$ and $f$ are trained independently of each other. $f$ always takes ground-truth concept values as input.
110
+ - SEQ: $g$ and $f$ are trained sequentially, $g$ first and $f$ next. $f$ takes predicted concept values as input from trained $g$ .
111
+ - JNT: $g$ and $f$ are trained jointly at the same time as a multi-objective. This results in increased initial task accuracy but comes with the price of decreased intervention effectiveness (Koh et al., 2020).
112
+ - JNT+P: similar to JNT but the input to $f$ is sigmoid-activated probability distribution rather than logits.
113
+
114
+ Conceptualization We consider different forms of concept predictions as input to the target predictor at inference:
115
+
116
+ - SOFT: $f$ takes real values of $\hat{c} \in [0,1]^k$ as soft representation of concepts (Koh et al., 2020).
117
+ - HARD: $f$ takes binary values of $\hat{c} \in \{0,1\}^k$ as hard representation of concepts based on $\mathbb{1}[\hat{c} \geq 0.5]$ (Mahinpei et al., 2021). This prevents information leakage (Havasi et al., 2022) in exchange for decreased prediction performance.
118
+ - SAMP: $m$ random samples are drawn by treating the soft concept prediction scores as a probability distribution, and the target prediction is made as an ensemble, i.e., $\hat{y} = \frac{1}{m}\sum_{i=1}^{m}f(\hat{c})$ where $\hat{c}$ is binarized concept prediction (Havasi et al., 2022). We use $m = 5$ for the experiments.
119
+
120
+ # 4.2. Evaluating Concept Selection Criteria
121
+
122
+ We first evaluate the intervention effectiveness of concept selection criteria and present the results in Figure 3. Across all datasets, we find that the current practice of random intervention (RAND) is easily outperformed by the other alternatives in almost all cases with a significant margin. Specifically, in the CUB experiment, correcting 20 concepts by random intervention reduces the task error less than $4\%$ whereas correcting the same amount based on the uncertainty of concept predictions (UCP) leads to more than $16\%$ error reduction. To put it differently, RAND requires to intervene on 43 concepts in order to reduce the error by half, while it is only 12 concepts to fix for UCP to achieve the same reduction. In the SkinCon experiment, selecting concepts based on the expected change in target prediction (ECTP) leads the way among others, and yet, the scale of improvements over RAND is not as large. Note also that the strategy of fixing concepts with the largest loss first (LCP) performs exceptionally well in all cases. This is however due to the help of the ground-truth knowledge on concepts which is unavailable in practice. Nonetheless, we believe this can serve as an indicator to guide a better intervention strategy which we defer to future work.
123
+
124
+ # 4.2.1. REFLECTING COST OF INTERVENTION
125
+
126
+ As we discussed in Section 3.2.1, the cost of intervention may differ by concept selection criteria. Taking into account this aspect, we set up experiments where we can evaluate the intervention effectiveness in terms of the theoretical cost. Specifically, we model the relationships between $\tau_{i}$ , $\tau_{g}$ , $\tau_{f}$ as $\tau_{i} = \alpha \tau_{g}$ and $\tau_{g} = \beta \tau_{f}$ , which means that the cost of intervention (e.g., time to fix a concept) is $\alpha$ -proportional to the cost of making inference on $g$ , and likewise, $\tau_{g}$ is $\beta$ -proportional to $\tau_{f}$ . Then we can evaluate the cost-reflected intervention effectiveness with respect to arbitrary unit ( $v$ ), and from which, we can further show how it transforms by controlling $\alpha$ and $\beta$ .
127
+
128
+ ![](images/991c80b160b98a54fffd02c1a3ababcb3751d2603915401102bec0a6c766bd27.jpg)
129
+ (a) CUB
130
+
131
+ ![](images/2fa123db3af76c4a86f156f6f744994fa31225ff7e585a495eee1e55a39386e2.jpg)
132
+ (b) SkinCon
133
+
134
+ ![](images/05249e6cb70fb2d704e57d30f5cb4422c73e2bac88d37d10371d0112a64f1512.jpg)
135
+ (c) Synthetic
136
+
137
+ ![](images/f77ce79eada1f20057511c1407d7df8fb3ab122948b7f3b50cb5bcda85700098.jpg)
138
+ Figure 3: Intervention effectiveness of concept selection criteria (task error vs. number of concepts corrected by intervention) measured on I+S level. A more effective method would reduce the error more for the same number of concepts intervened.
139
+ (a) $\alpha = 0.01$
140
+ Figure 4: Effect of $\alpha$ on intervention (on Synthetic). We fix $\tau_{i} = 1, \beta = 100, k = 100$ . ECTP, the intervention method strongly evaluated previously, becomes less effective as $\alpha$ decreases. Here, kinked shapes are due to the relatively high initial cost on the first intervention before $n$ becomes large.
141
+
142
+ ![](images/9748c817a5ac077be2fe3eb1de56a2b9869af3f0accdb0e36c09866c3f81143d.jpg)
143
+ (b) $\alpha = 0.03$
144
+
145
+ ![](images/244e1a8ee7f583c7d549e7c4bab528afef1e2e16d5cd928768d47e56570aa920.jpg)
146
+ (c) $\alpha = 0.05$
147
+
148
+ ![](images/42beddabee7eec05d3edd9b1613aa98b19c6bceeba880e8e2989ab6bccec2691.jpg)
149
+ (d) $\alpha = 0.1$
150
+
151
+ ![](images/6b2ad4e649b81da57560b61ed2ca963b9d65bec05f527e02c4634688b92ff4ad.jpg)
152
+ (e) $\alpha = 1.0$
153
+
154
+ ![](images/e80927edf2f5ee92c1e495ad1b159f870947ccb16ca3ed1fc39fd533a366f97d.jpg)
155
+ (f) $\alpha = 100.0$
156
+
157
+ First, the result of changing $\alpha$ is plotted in Figure 4. As $\alpha$ becomes smaller RAND becomes very effective compared to ECTP. This makes sense because with small $\alpha$ , $\tau_{i}$ becomes relatively small and the other terms related to $\tau_{g}$ or $\tau_{f}$ dominate the cost of ECTP which is $\mathcal{O}\big(\tau_g + (2k + 2)\tau_f + n\tau_i\big)$ as seen in Table 2. ECTP thus becomes penalized when it comes to the intervention effectiveness in the small $\alpha$ regime. In contrast, when $\alpha$ becomes larger, $\tau_{i}$ dominates the cost of ECTP as with increasing $n$ , which in turn recovers the effectiveness of ECTP. The former can happen in extreme circumstances, for example, when using very large models (i.e., large $\tau_{g}$ ) or in places with a tight labor marker (i.e., small $\tau_{i}$ in terms of monetized value). We clearly remark, however, that this can be seen as a hypothetical case and $\alpha$ will be much greater than 1 in realistic settings as summoning a domain expert for intervention would require more cost than a forward pass of neural networks.
158
+
159
+ We also experiment on changing $\beta$ to control the relative cost between $\tau_{g}$ and $\tau_{f}$ . As a result, we find that when $\beta$ is small ECTP can perform poorly while RAND can be effective as it only requires a single forward pass of $f$ to make the final prediction. Furthermore, we extend this analysis to the CUB experiment with more realistic settings where $\tau_{g}$ and $\tau_{f}$ are set based on the wall-clock times of running each model, and $\tau_{i}$ is set based on the actual concept annotation
160
+
161
+ time provided in the dataset. All of these results are put in Appendix C with detailed analysis for space reasons.
162
+
163
+ # 4.3. Analyzing Intervention Levels
164
+
165
+ As seen in Figure 5a, most criteria still remain more effective than RAND in group-wise single $(\mathrm{G} + \mathrm{S})$ intervention. Specifically, RAND needs $39.3\%$ (11 out of 28), while UCP needs $25.0\%$ (7 out of 28) of the groups to be intervened to decrease the task error by half. However, CCTP does not outperform RAND this time. We also find a similar pattern for the batch case $\mathrm{G} + \mathrm{B}$ (see Figure 14 in Appendix D). We suspect that calculating the mean of the scores loses some discriminative information in some selection criteria and perhaps a different surrogate needs to be designed.
166
+
167
+ In addition, we find that group-wise intervention is in general less effective than individual counterpart with the same budget of intervention expense (see Figure 5b). Intuitively, correcting concepts within the same group may not provide rich information as opposed to selecting concepts across different groups with the same intervention counts. Nonetheless, we remark that group-wise intervention can potentially be cost-effective when concepts within the same group are mutually exclusive, which depends on how the concepts are annotated during the creation of datasets.
168
+
169
+ ![](images/9cd7ff894c874d55ada1e9927e66e8b0fe21b429e32c0bf9786ac9ddcd69cc7e.jpg)
170
+ (a) $\mathrm{G + S}$ level
171
+
172
+ ![](images/610c68a19b30450f10b834280a9d84a1c97f3c767a0e3f8a698d94b903740a63.jpg)
173
+ (b) $(\mathrm{I} + \mathrm{S})$ vs. $(\mathrm{G} + \mathrm{S})$
174
+ Figure 5: Comparing the effects of different intervention levels using the CUB dataset. Here, intervention counts denote the number of intervened groups and average number of intervened concepts for G and B, respectively. We fix the selection criterion to be UCP in (b) and (d) while all other cases are provided in Appendix D.
175
+
176
+ ![](images/a498a01dc2babb1fb747357cfaf6bcdbef909b27000ab40f65d0aee0ae3e5a41.jpg)
177
+ (c) I+B level
178
+
179
+ ![](images/3764caef1f0035e3a3846159f937b7a2972420dea2ba55ac72ec26e9f3e8c6f4.jpg)
180
+ (d) $(\mathrm{I} + \mathrm{S})$ vs. $(\mathrm{I} + \mathrm{B})$
181
+
182
+ The proposed concept selection criteria also remain effective for batch intervention (B) as seen in Figure 5c. Interestingly, batch intervention turns out to be more effective when compared to single (S) as well as seen in Figure 5d. This trend holds true for other criteria besides UCP except for CCTP and extends to group-wise batch $(\mathrm{G} + \mathrm{B})$ intervention (see Appendix D for full results).
183
+
184
+ # 4.4. Considering Training and Conceptualization
185
+
186
+ Effect of training scheme As seen in Figure 6a, intervention is in general the most effective under the IND training scheme. We believe that this is because $f$ is not trained with the ground-truth concept labels in the case of SEQ and JNT(+P), and fixing concept predictions for these schemes may not work as well. We also find that EUDTP becomes much less effective under SEQ or JNT than other alternatives and actually underperforms RAND (see Appendix E). Hence, the effectiveness of a criterion can depend on which training strategy to use, implying the need of comprehensive evaluations for newly developed criteria.
187
+
188
+ For the SkinCon dataset, however, intervening on the concepts under SEQ, JNT, JNT + P strategies rather increases the average task error regardless of the concept selection criteria. Specifically, training under JNT already achieves low task error and applying intervention does not help reduce it further (see Figure 6b). We hypothesize that this is due to some inherent characteristics of the dataset as well as limited concepts provided in the bottleneck, resulting in the negative influence on making correct task predictions with binarized concepts. This can potentially correspond to the known issue of information leakage in CBMs (Mahinpei et al., 2021; Havasi et al., 2022).
189
+
190
+ Effect of conceptualization We find that HARD and SAMP may begin with high task error compared to SOFT as expected. However, when making use of the developed concept selection criteria such as UCP, the gap between these conceptualization methods decreases much faster with more intervention compared to RAND as seen in Figures 6c and 6d.
191
+
192
+ This result is consistent across different training strategies and datasets (see Appendix F).
193
+
194
+ # 5. Analyzing Intervention with Synthetic Data
195
+
196
+ We have observed that intervention can often yield different results over datasets. Precisely, intervening on all concepts decreases the task error down to $0\%$ on CUB, whereas the amount of decrease is much less and the average task error remains still high around $29\%$ on SkinCon. Also, the relative order of effectiveness between concept selection criteria can vary. We find that it is difficult to unravel these findings if only experimenting on real datasets as in previous work (Koh et al., 2020; Chauhan et al., 2022; Sheth et al., 2022; Zarlenga et al., 2022). To provide an in-depth analysis, we develop a framework to generate synthetic datasets based on three different causal graphs that control the followings: input noise, hidden concepts, and concept diversity.
197
+
198
+ # 5.1. Generating Synthetic Data
199
+
200
+ CASE 1: Noisy input Real-world data contains a lot of random noise coming from various sources (e.g., lighting). We construct a causal graph to consider this case where the Gaussian noise is added on input data (see Figure 7a).
201
+
202
+ CASE 2: Hidden concept When a subset of concepts is unknown or hidden, the target prediction is made incomplete with only available concepts as deep representations are not fully captured in the bottleneck layer. We design a causal graph for this case and generate synthetic data for which some concepts that are necessary to make correct target predictions are hidden on purpose (see Figure 7b).
203
+
204
+ CASE 3: Diverse concept Examples within the same class can have different values for the same concept in realistic settings. For instance, simple concept-level noise or fine-grained sub-classes (e.g., 'black swan' and 'white swan' for 'swan' class) can make such diverse concept values. We construct a causal graph to generate such data for which concept values can vary probabilistically and inputs
205
+
206
+ ![](images/e187fc99eb2f19732359c451f8446bb59ed10b478a2f084ddfddc5a79a0bcb3a.jpg)
207
+ (a) Training on CUB
208
+
209
+ ![](images/cedabfecb39443fb71f98ef6160101f5ac7e1317789366c7ad994cdda302cca4.jpg)
210
+ (b) Training on SkinCon
211
+
212
+ ![](images/180eb1c6953f7ffd7227497dd4bca3a480ea625de929ec17effdc92c4ff9c8fd.jpg)
213
+ (c) Conceptualization: RAND
214
+
215
+ ![](images/0fd230aef138c394758e570d0207dc458028be5bb34c215f4a7c89eb7224f71e.jpg)
216
+ (d) Conceptualization: UCP
217
+
218
+ ![](images/7b9918e690404b30724e61135d82998a3f83af191f53f007d88616e7eadebef7.jpg)
219
+ Figure 6: Comparing the effects of different training strategies (a,b) and conceptualization methods (c, d). We choose EUDTP as the concept criterion for (a,b) and SkinCon as the dataset for (c, d). We provide all other results in Appendices E and F.
220
+ (a) Noisy input
221
+
222
+ ![](images/835c7e1c941808c82deacec20cc59550cd4c2b128309f89615f0ff2e2495431d.jpg)
223
+ (b) Hidden concept
224
+
225
+ ![](images/2e2e0989c6fe72fc8e37afb407ae887c86f6c83a9aba384ea407023f2c680c0c.jpg)
226
+ (c) Diverse concept
227
+
228
+ ![](images/f9f973a280c94f8ece4cbd01110d3279b7301d7047bb80c8edc56d00f6369eec.jpg)
229
+ Figure 7: Causal graphs for generating synthetic datasets. $z$ , $h$ , and $d$ represent factors of input noise, hidden concepts, and concept diversity, respectively. The full details of the data generation process are provided in Appendix A.3.
230
+ (a) Noisy input
231
+
232
+ ![](images/2e86296930d81549f536bbf522a619091c8f39df9f048108a214be02b5795306.jpg)
233
+ (b) Hidden concept
234
+
235
+ ![](images/0a41a669678465f03a02cd7c376e1de46d1b5e46536f55733b3185d0b195e334.jpg)
236
+ (c) Diverse concept
237
+
238
+ ![](images/12b2b66d27c62361c1e577758a66261f8f4ac3c4f2eb1d9757cba56c7b6d74b9.jpg)
239
+ Figure 8: Effects of data on intervention with UCP. Each plot is with different values of the variance of noise $(z)$ , the ratio of hidden concepts $(h)$ , and the probability to perturb the concept values $(d)$ , respectively.
240
+ (a) $\gamma = 1$
241
+ Figure 9: Intervention effectiveness with different sub-group size $\gamma$ . The relative order of effectiveness between selection criteria changes significantly according to $\gamma$ .
242
+
243
+ ![](images/ee046aacf361a66e2142bd425eb4b9503be3d7d748cf4835f840237885496187.jpg)
244
+ (b) $\gamma = 10$
245
+
246
+ are produced according to these concepts (see Figure 7c).
247
+
248
+ # 5.2. Results
249
+
250
+ First, we display the effect of input noise in Figure 8a. The initial task error increases with a level of noise $(z)$ due to the poor performance on concept prediction. Specifically, we need 17 intervention counts to decrease the task error by half with extremely noisy data $(z = 2.0)$ while correcting only 2 concepts yields the same effect for a moderate level of noise case $(z = 0.5)$ . In contrast, the initial task error is already near $0\%$ with an extremely small level of noise $(z = 0.1)$ where we do not need intervention at all.
251
+
252
+ Next, we evaluate the effect of hidden concepts in Figure 8b. The final task error increases with more hidden concepts, and thus, intervention becomes less effective. Specifically, the error is still high around $13\%$ when half of the concepts are hidden ( $h = 50\%$ ) while it reaches zero error without hidden concepts ( $h = 0\%$ ). This is due to the fact that the target prediction cannot be made with complete information when there exist hidden concepts, which is often the case for constructing CBMs in realistic settings.
253
+
254
+ We also find that generating more diverse concept values within the same class increases both initial and final task errors, making intervention less effective (see Figure 8c). This is because learning discriminative representations for target prediction would be a lot more difficult. To circumvent this issue, many previous works (Koh et al., 2020; Zarlenga et al., 2022; Havasi et al., 2022) attempt to preprocess the data so as to force concepts within the same class have the same value. However, this may have an adverse effect on model fairness as we discuss in Section 6.
255
+
256
+ Furthermore, we discover that different sub-group sizes can change the relative ordering of intervention effectiveness between concept selection criteria. Here, we define a subgroup as classes with similar concept values and denote its size as $\gamma$ . Interestingly, EUDTP becomes less effective with a small group size ( $\gamma = 1$ ) even compared to RAND whereas it becomes the most effective when $\gamma = 10$ except for LCP as seen in Figure 9. We believe that it is because classes within the same sub-group are classified more easily by decreasing uncertainty in target prediction using EUDTP when $\gamma$ is large.
257
+
258
+ ![](images/be92c758e927d457bcfad38f04ce7fb057e296a91393bd3da0733e8d94955cd0.jpg)
259
+ (a) RAND
260
+
261
+ ![](images/ab911e0dec44ada62641531ca6d1de43bc7bb76e680d7bb83b25ce68269f46c6.jpg)
262
+ (b) UCP
263
+
264
+ ![](images/e49740e04b84727ff7705120f5f7063ccaf54cda834af842b05c9cedfd1f2fa4.jpg)
265
+ (c) ECTP
266
+
267
+ ![](images/71ef9889f04068cc96cf9eef3f7f4bf1f15421ac46108f3e506d5325d68ac90a.jpg)
268
+ Figure 10: Effect of NVC on task error. Intervention is done on the CUB images for which concept prediction is $100\%$ accurate, and yet, NVC keeps on increasing the task error. NVC O and NVC X each correspond to the result with and without NVC.
269
+ (a) Advantage of MV
270
+
271
+ ![](images/9f8a3f30c78585622dc412a57873c97c242648c8b5069c1134fb94cc3ca4b808.jpg)
272
+ (b) Disadvantage of MV
273
+ Figure 11: Effects of majority voting (MV) on target prediction. MV O and MV X each correspond to the result with and without MV. (a) While it helps decrease task error on intervention, (b) it yields biased predictions against minorities.
274
+
275
+ The result indicates that the behavior of a criterion can vary significantly across different datasets and again demonstrate the necessity of a comprehensive evaluation of the newly developed criteria. We refer to Appendix G for results on the effect of some other factors on intervention.
276
+
277
+ # 6. Pitfalls of Intervention Practices
278
+
279
+ So far we have focused on analyzing the effectiveness of intervention procedure in many aspects. In this section, we add another dimension, namely, reliability and fairness of the current intervention practices, to help advance toward trustworthy and responsible machine learning models.
280
+
281
+ # 6.1. Nullifying Void Concepts Increases Task Error
282
+
283
+ Does intervention always help target prediction? Contrary to expectation, we find that the answer is no, and in fact, intervention can rather increase the task error. To verify this, we set up an ablation experiment using the CUB dataset where intervention is conducted only on the cases for which all concepts are predicted correctly with zero error; ideally intervention should have no effect in this case. The results are quite the opposite as presented in Figure 10. The task error keeps on increasing as with more intervention, and the prediction error reaches to more than seven times as much as that with no intervention.
284
+
285
+ It turns out that it is due to nullifying void concepts (NVC), a common practice of treating unsure concepts by setting them to be simply zero, which leads to this catastrophic failure. For example, just because the wing part of a bird species is invisible does not necessarily mean that the concept 'wing color:black' should be zero valued; this bird can fall in the class of 'Black_Tern' whose wing color is actually black. We identify that this seemingly plausible tactic can in fact mistreat invalid concepts, and therefore, for invalid cases applying NVC intervention should be avoided.
286
+
287
+ # 6.2. Majority Voting Neglects Minorities
288
+
289
+ Another common practice often taken by the community (Koh et al., 2020; Zarlenga et al., 2022; Havasi et al., 2022) is to coalesce concept values among the same class by forcing them to have their majority votes (MV). As a preprocessing, this tactic can dramatically improve the task performance as we demonstrate in Figure 11a. This is quite obvious by now as with our Synthetic experiment results in Section 5.2 where we show that high concept diversity can deteriorate the target prediction performance.
290
+
291
+ However, it turns out that MV can have a negative impact on model fairness by ignoring minority samples. As a concrete example, consider the CUB dataset in which the majority of images of 'black tern' class have black underparts while some minority samples have white underparts. When MV is used in this case, we find that the underparts color predictions for the minorities are mis-guided to be black, which correspond to the majority-voted values, so as to yield the correct target prediction; if the minorities follow their own concept values before MV otherwise, it can lead to an incorrect target prediction (see Figure 11b). Intervention can even aggravate the situation since it can decrease the task error for the minorities only when the predicted concept value is changed to the majority-voted value (black). In this sense, target predictions become biased toward the majority when MV is used.
292
+
293
+ This scenario can be problematic in the real world when the dataset contains sensitive concepts, e.g., gender or race. Consider the case where the target task is to predict the occupation of a person based on his/her look and 'race' is included in the concepts. When most 'physicians' are Caucasians and if we apply MV in this case, then an 'Asian physician' can be correctly classified only when he is predicted as a Caucasian; otherwise, it would lead to an incorrect target prediction. While this might be somewhat exaggerated, we remark that this kind of situation can happen in practice. Besides, MV also forces us to misconduct intervention at test time with the majority votes, which is
294
+
295
+ neither available in practice nor considered fair. We defer addressing the trade-off between performance and fairness to future work.
296
+
297
+ # 7. Discussion and future work
298
+
299
+ In this section, we discuss our key findings, their potential implications to the community, and possible future research directions.
300
+
301
+ In-depth analysis of intervention procedure We design and conduct a wide variety of new experiments from scratch to investigate the effectiveness of the current intervention procedure of CBMs. In a nutshell, our results reveal that not only is it the specific way of selecting which concept to intervene, but also how to intervene on what data under which environments matters to the degree of drastically changing results. Future works can extend our analysis to theoretically investigate the intervention strategies in more detail.
302
+
303
+ Benchmark for evaluating concept selection methods Our evaluation protocol can serve as a way to evaluate any newly developed concept selection methods for their effectiveness. We also provide a framework to generate synthetic data based on which the effectiveness of proposed methods can be tested under various circumstances.
304
+
305
+ Analyzing the cost of intervention The effectiveness of concept selection criteria can change when reflecting the cost of intervention (see Section 4.2.1). Specifically, we find that a strongly evaluated criterion can become less effective in hypothetical cases considering the size of the models or the status of the labor markets. This indicates that choosing the concept selection criterion should reflect the available budgets and environments at test time, especially in some extreme environments.
306
+
307
+ Identifying the effect of data on intervention The effectiveness of the intervention procedure can vary quite significantly depending on some unknown characteristics of the real-world datasets (see Section 5). For example, intervention becomes less effective on datasets containing more hidden concepts or more diverse concept values within the same class. Practitioners should take into account this aspect when developing and deploying CBMs since intervention may not work effective as expected.
308
+
309
+ Reliability and fairness of intervention While the current trend is mostly focused on developing new intervention methods, we discovered somewhat unexpected and previously unknown issues, which can be critical for ensuring reliability and fairness of the intervention procedure (see Section 6). To be more specific, intervention can sometimes increase the task error contrary to the expectation and have a negative impact on model fairness by making the predictions biased toward the majority. We call for future work
310
+
311
+ to address these problems before blindly adopting CBMs in practice.
312
+
313
+ Extension of our work to other settings We remark that we have only focused on the classification tasks, considering the characteristics of the real-world datasets used in the literature (Koh et al., 2020; Zarlenga et al., 2022; Havasi et al., 2022)<sup>1</sup>. Extension of the intervention strategies to the regression problems with real-valued concepts or targets can be a promising avenue for future works. Analyzing intervention under more diverse settings could also be interesting, such as introducing architectural variations with hard autoregressive models (Havasi et al., 2022) or concept embedding models (Zarlenga et al., 2022).
314
+
315
+ # 8. Conclusion
316
+
317
+ The intervention procedure of CBMs has been unattended in previous work despite its critical impact on practitioners. In this work, we study a wide range of aspects regarding the procedure and provide an in-depth analysis for the first time in the literature. Specifically, we develop various concept selection criteria that can be used for intervention and demonstrate that their behaviors can vary quite significantly based on an array of factors including intervention levels, cost, training, conceptualization, and data characteristics. We also find several pitfalls in the current practices that need a careful addressing to be deployed in realistic settings. We plan to investigate further on developing more effective and reliable intervention strategies in future work.
318
+
319
+ # Acknowledgement
320
+
321
+ This work was partly supported by Institute of Information & communications Technology Planning & Evaluation (IITP) grant funded by the Korea government (MSIT) (No.2019-0-01906, Artificial Intelligence Graduate School Program (POSTECH) and No.2022-0-00959, (part2) Few-Shot learning of Causal Inference in Vision and Language for Decision Making) and National Research Foundation of Korea (NRF) grant funded by the Korea government (MSIT) (2022R1C1C1013366, 2022R1F1A1064569, RS-2023-00210466).
322
+
323
+ # References
324
+
325
+ Alvarez Melis, D. and Jaakkola, T. Towards robust interpretability with self-explaining neural networks. NeurIPS, 2018.
326
+
327
+ Bahadori, M. T. and Heckerman, D. E. Debiasing concept-based explanations with causal analysis. *ICLR*, 2021.
328
+
329
+ Chauhan, K., Tiwari, R., Freyberg, J., Shenoy, P., and Dvi-jotham, K. Interactive concept bottleneck models. AAAI, 2022.
330
+ Daneshjou, R., Vodrahalli, K., Novoa, R. A., Jenkins, M., Liang, W., Rotemberg, V., Ko, J., Swetter, S. M., Bailey, E. E., Gevaert, O., et al. Disparities in dermatology ai performance on a diverse, curated clinical image set. Science advances, 2022a.
331
+ Daneshjou, R., Yuksekgonul, M., Cai, Z. R., Novoa, R. A., and Zou, J. Skincon: A skin disease dataset densely annotated by domain experts for fine-grained debugging and analysis. NeurIPS Datasets and Benchmarks Track, 2022b.
332
+ Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., and Fei-Fei, L. Imagenet: A large-scale hierarchical image database. CVPR, 2009.
333
+ Esteva, A., Kuprel, B., Novoa, R. A., Ko, J., Swetter, S. M., Blau, H. M., and Thrun, S. Dermatologist-level classification of skin cancer with deep neural networks. Nature, 2017.
334
+ Ghorbani, A., Wexler, J., Zou, J. Y., and Kim, B. Towards automatic concept-based explanations. *NeurIPS*, 2019.
335
+ Groh, M., Harris, C., Soenksen, L., Lau, F., Han, R., Kim, A., Koochek, A., and Badri, O. Evaluating deep neural networks trained on clinical images in dermatology with the fitzpatrick 17k dataset. CVPR, 2021.
336
+ Guo, Y. and Greiner, R. Optimistic active-learning using mutual information. *IJCAI*, 2007.
337
+ Havasi, M., Parbhoo, S., and Doshi-Velez, F. Addressing leakage in concept bottleneck models. NeurIPS, 2022.
338
+ Jordan, M. I. and Mitchell, T. M. Machine learning: Trends, perspectives, and prospects. Science, 2015.
339
+ Kim, B., Wattenberg, M., Gilmer, J., Cai, C., Wexler, J., Viegas, F., et al. Interpretability beyond feature attribution: Quantitative testing with concept activation vectors (tcav). ICML, 2018.
340
+ Koh, P. W., Nguyen, T., Tang, Y. S., Mussmann, S., Pierson, E., Kim, B., and Liang, P. Concept bottleneck models. ICML, 2020.
341
+ LeCun, Y., Bengio, Y., and Hinton, G. Deep learning. Nature, 2015.
342
+ Lewis, D. D. A sequential algorithm for training text classifiers: Corrigendum and additional data. Acm Sigir Forum, 1995.
343
+
344
+ Lewis, D. D. and Catlett, J. Heterogeneous uncertainty sampling for supervised learning. Machine learning proceedings, 1994.
345
+ Mahinpei, A., Clark, J., Lage, I., Doshi-Velez, F., and Pan, W. Promises and pitfalls of black-box concept learning models. Workshop on XAI, ICML, 2021.
346
+ Marconato, E., Passerini, A., and Teso, S. Glancenets: Interpretable, leak-proof concept-based models. NeurIPS, 2022.
347
+ Margeloiu, A., Ashman, M., Bhatt, U., Chen, Y., Jamnik, M., and Weller, A. Do concept bottleneck models learn as intended? Workshop on Responsible AI, ICLR, 2021.
348
+ Nevitt, M., Felson, D., and Lester, G. The osteoarthritis initiative. Protocol for the cohort study, 2006.
349
+ Sawada, Y. and Nakamura, K. Concept bottleneck model with additional unsupervised concepts. IEEE Access, 2022.
350
+ Selvaraju, R. R., Cogswell, M., Das, A., Vedantam, R., Parikh, D., and Batra, D. Grad-cam: Visual explanations from deep networks via gradient-based localization. CVPR, 2017.
351
+ Settles, B., Craven, M., and Ray, S. Multiple-instance active learning. NeurIPS, 2007.
352
+ Sheth, I., Rahman, A. A., Sevyeri, L. R., Havaei, M., and Kahou, S. E. Learning from uncertain concepts via test time interventions. Workshop on Trustworthy and Socially Responsible Machine Learning, NeurIPS, 2022.
353
+ Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., and Wojna, Z. Rethinking the inception architecture for computer vision. CVPR, 2016.
354
+ Wah, C., Branson, S., Welinder, P., Perona, P., and Belongie, S. The caltech-ucsd birds-200-2011 dataset. 2011.
355
+ Yuksekgonul, M., Wang, M., and Zou, J. Post-hoc concept bottleneck models. *ICLR*, 2023.
356
+ Zarlenga, M. E., Barbiero, P., Ciravegna, G., Marra, G., Giannini, F., Diligenti, M., Shams, Z., Precioso, F., Melacci, S., Weller, A., et al. Concept embedding models. NeurIPS, 2022.
357
+ Zhou, B., Khosla, A., Lapedriza, A., Oliva, A., and Torralba, A. Learning deep features for discriminative localization. CVPR, 2016.
358
+
359
+ # A. Datasets
360
+
361
+ # A.1.CUB
362
+
363
+ CUB (Wah et al., 2011) is the standard dataset used to study CBMs in the previous works (Koh et al., 2020; Zarlenga et al., 2022; Havasi et al., 2022; Sawada & Nakamura, 2022). There are 5994 and 5794 examples for train and test sets in total, in which each example consists of the triplet of (image $x$ , concepts $c$ , label $y$ ) of a bird species. All the concepts have binary values; for example, the 'wing color:black' for a given bird image can be either 1 (for true) or 0 (for false). Following previous works (Koh et al., 2020; Sawada & Nakamura, 2022; Zarlenga et al., 2022), we perform so-called majority voting as pre-processing so that images of the same class always have the same concept values; for example, if more than half of the crow images have true value for the concept 'wing color:black' then this process converts all concept labels for images belonging to the crow class to have the same true value. Since the original concept labels are too noisy, this procedure helps to increase the overall performance. However, it can be potentially harmful to model fairness in some cases as we address in Section 6.2. We also remove concepts that are too sparse (i.e., concepts that are present in less than 10 classes) which results in 112 out of 312 concepts remaining. It is suggested in Koh et al. (2020) that including these sparse concepts in the concept layer makes it hard to predict their values as the positive training examples are too scarce.
364
+
365
+ # A.2. SkinCon
366
+
367
+ SkinCon (Daneshjou et al., 2022b) is a medical dataset which can be used to build interpretable machine learning models. The dataset provides densely annotated concepts for 3230 images from Fitzpatrick 17k skin disease dataset (Groh et al., 2021), which makes a triplet of (image $x$ , concepts $c$ , disease label $y$ ) of a skin lesion for each example. Since training and test sets are not specified in the SkinCon dataset, we randomly split the dataset into $70\%$ , $15\%$ , $15\%$ of training, validation, and test sets respectively. The dataset provides various levels of class labels ranging from individual disease labels with 114 classes to binary labels representing if the skin is benign or malignant. Following the experiments with Post-hoc CBM (Yuksekgonul et al., 2023) introduced in Daneshjou et al. (2022b), we use the binary labels for the target task and only use 22 concepts which are present in at least 50 images. Since the binary class labels are highly imbalanced ( $87\%$ vs. $13\%$ ), we train the target predictor $f$ with weighted loss and use the average of per-class error as the metric instead of overall error for a fair comparison.
368
+
369
+ # A.3. Synthetic dataset
370
+
371
+ Algorithm 1 Generating synthetic data
372
+ 1: Sample $p_i \sim \mathcal{N}(\mu_\alpha, \sigma_\alpha)$ for $i = \{1, 2, \dots, k\}$
373
+ 2: for group $\ell = 0, 1, \dots, k / \gamma - 1$ do
374
+ 3: Sample $\zeta_i \sim \mathcal{U}_{[0,1]}$ and set $\ell_i = \mathbb{1}[\zeta_i \geq p_i]$ for $i = \{1, 2, \dots, k\}$
375
+ 4: for $y = 1, \dots, \gamma$ do
376
+ 5: Sample $i_y \in \{1, 2, \dots, k\}$ uniformly at random without replacement
377
+ 6: Set $c_i^j = \neg \ell_i$ if $i = i_y$ and $c_i^j = \ell_i$ otherwise (class index $j = \gamma * \ell + y$ )
378
+ 7: end for
379
+ 8: end for
380
+ 9: Generate $W_x \in \mathbb{R}^{k \times k}$ with each element distributed according to the unit normal distribution $\mathcal{N}(0, \sigma_w)$
381
+ 10: for class $j = 1, \dots, k$ do
382
+ 11: Generate $\nu$ samples for class $j$ as $x = W_x \cdot c^j + z$ where $z \sim \mathcal{N}(0, \sigma_z)$
383
+ 12: end for
384
+
385
+ We generate the synthetic data following Algorithm 1 to test the effect of dataset characteristics on intervention. Here, we first assume that all examples within the same class share the same concept values and denote the $i$ -th concept value of $j$ -th class as $c_i^j$ . We also assume for simplicity that the dimensionality of inputs and the number of target classes are the same as the number of concepts $k$ , following Bahadori & Heckerman (2021). In line 1, $\mu_{\alpha}$ and $p_i = P(c_i = 0)$ each represent the overall sparsity level of the concepts (proportion of concepts with value 0) and the probability of $i$ -th concept taking value 0, respectively. We set $\mu_{\alpha}$ to be 0.8 considering that $80\%$ of the concepts have value 0 in the CUB dataset. We then divide classes into $k / \gamma$ sub-groups of size $\gamma$ to make those within the same group have similar concept values. Note that the classes within each sub-group only differ by two concept values as seen in line 6. We set
386
+
387
+ $\gamma = 2, k = 100, \nu = 100, \sigma_{\alpha} = 0.1, \sigma_{w} = 0.1, z_{\alpha} = 0.8$ unless stated otherwise. We randomly divide the generated examples into $70\%$ of training sets, $15\%$ of validation sets, and $15\%$ of test sets.
388
+
389
+ To generate the data with hidden concepts, we randomly pick $h\%$ of the concepts and remove them from the concept layer of CBMs. For training the models and intervention experiments, we only consider the remaining concepts. In addition, a new dataset with diverse concepts can be easily produced by introducing a single variable $d$ and reversing the value of each concept from the previously generated dataset with probability $d$ . In other words, $d$ stands for a factor to give variations to concept-target pairs that can exist in real world datasets, and it differs from the role of $z$ which controls the noise level to the input.
390
+
391
+ # B. Architectures and Training
392
+
393
+ CUB For the CUB dataset, we use Inception-v3 (Szegedy et al., 2016) pretrained onImagenet (Deng et al., 2009) for the concept predictor $g$ and 1-layer MLP for the target predictor $f$ respectively following the standard setup as in Koh et al. (2020). Here, both $g$ and $f$ are trained with the same training hyperparameters as in Koh et al. (2020). We used $\lambda = 0.01$ for JNT and JNT+P whose values were directly taken from Koh et al. (2020). For the experiments without majority voting (Figure 30 in Appendix H), we use Inceptionv3 pretrained on theImagenet for $g$ and 2-layer MLP for $f$ with the dimensionality of 200 so that it can describe more complex functions. We searched the best hyperparameters for both $g$ and $f$ over the same sets of values as in Koh et al. (2020). Specifically, we tried initial learning rates of [0.01, 0.001], constant learning rate and decaying the learning rate by 0.1 every [10, 15, 20] epoch, and the weight decay of [0.0004, 0.00004]. After finding the optimal values of hyperparameters whose validation accuracy is the best, we trained the networks with the same values again over 5 different random seeds on both training and validation sets.
394
+
395
+ SkinCon For the SkinCon dataset, we fine-tune Deepderm (Daneshjou et al., 2022a) for the concept predictor $g$ , which is the Inception-v3 network trained on the data in Esteva et al. (2017), and train 1-layer MLP for the target predictor $f$ . We select hyperparameters that achieve the best performance (in terms of overall accuracy and average per-class accuracy for $g$ and $f$ respectively) in the validation set. Specifically, we tried initial learning rates of [0.0005, 0.001, 0.005], and constant learning rate and decaying the learning rate by 0.1 every 50 epoch. Here, we did not use the weight decay factor. For JNT and JNT+P training strategies, we tried concept loss weight $\lambda$ of [0.01, 0.1, 1.0, 5.0], but all of the values failed to decrease the task error at intervention. As in the CUB dataset, we trained the networks with the best hyperparameters over 5 different random seeds on the both training and validation sets.
396
+
397
+ Synthetic For the synthetic datasets, we use 3-layer MLP of hidden layer size $\{100,100\}$ for $g$ and a single linear layer for $f$ , as similar to Zarlenga et al. (2022). For all the experiments, we tried constant learning rates of [0.01, 0.1, 1.0] without learning rate decay or weight decay factor and trained the networks with the best hyperparameters over 5 different random seeds on the training sets. We used $\lambda = 0.1$ for JNT and JNT+P whose values were determined by grid search over [0.01, 0.1, 1.0].
398
+
399
+ # C. More on Reflecting Cost of Intervention
400
+
401
+ As $\beta$ becomes smaller RAND becomes more effective compared to ECTP (see Figure 12). This is because with small $\beta$ , $\tau_{g}$ becomes marginalized in the cost of ECTP which is $\mathcal{O}(\tau_g + (2k + 2)\tau_f + n\tau_i)$ , and therefore, the intervention effectiveness of ECTP is penalized as with increasing $k$ compared to RAND which only requires a single forward pass of $f$ .
402
+
403
+ In addition, we experiment with more realistic settings for the CUB where we set $\tau_{i}$ as the concept annotation time (seconds) provided in the dataset and $\tau_{g},\tau_{f}$ as the wall-clock times for the inference. Specifically, we set $\tau_{i}\approx 0.7$ by dividing the annotation time into the number of concepts within the group and taking the average. In addition, $\tau_{g}\approx 18.7\times 10^{-3}$ and $\tau_{f}\approx 0.03\times 10^{-3}$ are acquired by measuring the inference time with RTX 3090 GPU and taking the average of 300 repetitions. In this setting, $\tau_{i}$ dominates the others, i.e., $\alpha$ is large, and the relative effectiveness between the criteria remains the same as seen in Figure 13. Nonetheless, we remark that the result can change with different model sizes or GPU environments in extreme cases. We also considered a more detailed case where we do not directly take the average of $\tau_{i}$ 's (concept annotation time) at once but rather take the average per intervention step, reflecting differences of intervention costs between different concepts. The relative rankings between RAND and ECTP do not change but interestingly we have
404
+
405
+ ![](images/acaa0fc90af8d9e2d4af320831715f1e4801457c0283c281ca1a15dcca8313cc.jpg)
406
+ (a) $\beta = 1$
407
+
408
+ ![](images/38f215b6ca06bf74fecfb09674c1e265113de0cb40ac6badff45042618c9cb6e.jpg)
409
+ (b) $\beta = 3$
410
+
411
+ ![](images/e59c935d89f2e8c812580037368e904c4bd9b8e9b2164107abc5f66a41611c80.jpg)
412
+ (c) $\beta = 5$
413
+
414
+ ![](images/158094a11f3d06b68f802e1196b3be95606cb9a36371d1a6896755b6ff1b7c6c.jpg)
415
+ (d) $\beta = 10$
416
+
417
+ ![](images/16228f1ae5cc2b92b6357af8c315291240c4a6dc6bf3a5331979780d60ad0dba.jpg)
418
+ (e) $\beta = 100$
419
+
420
+ ![](images/35212f9430c0a981277cba8d09f6b9147ae11efa227797ef80c12f021fe58812.jpg)
421
+ Figure 12: Effect of $\beta$ on intervention. We fix $\tau_{i} = 1, \alpha = 1, k = 100$ . ECTP, the concept selection criteria strongly evaluated previously, becomes less effective as $\beta$ decreases.
422
+ (a) $\tau_{i}$ set as the average among all concepts
423
+
424
+ ![](images/c77f9a275597c32ada3b07a08cd19e9e548e8a8363ad4b22c56c59a496d69562.jpg)
425
+ (b) $\tau_{i}$ set as the average per each intervention step
426
+ Figure 13: Comparison between concept selection criteria in terms of the intervention cost for the CUB. Here, cost represents the seconds for concept annotation time and inference times for $g$ , $f$ .
427
+
428
+ found that ECTP first selects the concepts which require more intervention costs (i.e., more concept annotation time).
429
+
430
+ # D. More Results on the Effect of Intervention Levels on Intervention
431
+
432
+ ![](images/53abcdad1bdbded2c19d7a3d0a6c59d33e5d11b8c1faed796fb61c7d80e9aea7.jpg)
433
+ (a) $\mathrm{I} + \mathrm{S}$
434
+
435
+ ![](images/006cceaeb25f0e428175fe6d8b14f429b4e429517e167a40a61e87c6550fccf9.jpg)
436
+ (b) $G + S$
437
+
438
+ ![](images/8e60679c752fb74608bc6c6f4563c7bfb6fa49c75bc7578d6169eaaff3a67c21.jpg)
439
+ (c) $\mathrm{I} + \mathrm{B}$
440
+
441
+ ![](images/912d032e82b3f946cd0dbdef4c805d009d6e814f0eadd1dbc57552a5cfdcc817.jpg)
442
+ (d) $\mathrm{G} + \mathrm{B}$
443
+
444
+ ![](images/78d1a514607d161a12d32bb1108c2fc40205b8d878a0d153fbdd8727b401b84d.jpg)
445
+ Figure 14: Comparison between intervention criteria under different levels for the CUB.
446
+ (a) RAND
447
+
448
+ ![](images/e796ae91b795f1f67e2caa9d7927ba319257c5a00a299f539e80b9818fc0890a.jpg)
449
+ (b) UCP
450
+ Figure 15: Comparison between I+S vs. G+S for the CUB.
451
+
452
+ ![](images/de99cd0108eb9d5ed48447b7feaa9b93203a3b885f8a0c1278aa159109d855a3.jpg)
453
+ (c) LCP
454
+
455
+ ![](images/eaedc4ba07aa64885f39fa5afbb310077f650d163a27a5022e85275feec9a10b.jpg)
456
+ (d) CCTP
457
+
458
+ ![](images/47dd53fc79a8ff45c37d96062f9900cc83237aa8bd0e9b72148b1a8ce43fdbe8.jpg)
459
+ (e) ECTP
460
+
461
+ ![](images/181e9021678a96f818686b695a0143a9cc10f8877b3b2360908f8f729815719f.jpg)
462
+ (f) EUDTP
463
+
464
+ The comparison between I+S and G+S using different concept selection criteria is presented in Figure 15. Individual
465
+
466
+ ![](images/181f2a9d2bd0f3a583ed921b7cc8dca6914b36f46fa8c5b2d0ae91ac1977bd0b.jpg)
467
+ (a) RAND
468
+
469
+ ![](images/a0e6a12e6e1859a2fec4e757dfb4ffbe18dd9de784ed6c55a1ce6a5b5e27be70.jpg)
470
+ (b) UCP
471
+
472
+ ![](images/56317d05db35ea67dd1f392619335231953a612f9e457238fa8a114fbb4c610a.jpg)
473
+ (c) LCP
474
+
475
+ ![](images/491b5c2f10cdc468b0ab2a9e370edc0a189652b243cdf0fdd10e742a3f7e7c4b.jpg)
476
+ (d) CCTP
477
+
478
+ ![](images/b8571dc164e2e68de4b24edf42b044a98d14806daf2c6ebc86691bf81a9ea186.jpg)
479
+ (e) ECTP
480
+
481
+ ![](images/25c92da71397ca8492882b32794058c0dd4b16a032912d6fcf0976de346c528f.jpg)
482
+ (f) EUDTP
483
+
484
+ ![](images/67f74821a73eeaf83c260313ef2bd054b183138be1a0674ec3530bad0509bda5.jpg)
485
+ (a) RAND
486
+
487
+ ![](images/7238dd903d6ad01a4957c4167c12d0cb9cd8f92aea08dea13b393dc723d73b5e.jpg)
488
+ (b) UCP
489
+
490
+ ![](images/2d3f905d54efb841e7fc449cd2cfa9f7d66140061456840dad7be18d18bcd60a.jpg)
491
+ Figure 16: Comparison between I+B vs. G+B for the CUB.
492
+ (c) LCP
493
+
494
+ ![](images/4d6ea4c9c38562fde0b325d601c8a21cef17fb3faf269dad7cc3fd1206284c9d.jpg)
495
+ (d) CCTP
496
+
497
+ ![](images/35ab8e65cbe1313e23025f3e8c7b675769a1a144f7e73bc1eaafd208ccfd4a9e.jpg)
498
+ (e) ECTP
499
+
500
+ ![](images/45913e4077a717020af758870ac9e57cb25344fe78d16df3779d16323299ea5c.jpg)
501
+ (f) EUDTP
502
+
503
+ ![](images/489e62ed38526af8e710a7318422551927e1241b7d0a9e1e0fb409d5619b77f4.jpg)
504
+ (a) RAND
505
+ Figure 18: Comparison between G+S vs. G+B for the CUB. For G+B, each point is plotted when the average number of intervened concepts per image first exceeds each integer value.
506
+
507
+ ![](images/4964518a859949f744c9ecd1a9f047ef7d0794b4e947a21a778e447791a31139.jpg)
508
+ Figure 17: Comparison between I+S vs. I+B for the CUB.
509
+ (b) UCP
510
+
511
+ ![](images/b9be29f2f82db70bb7619d1ec2b381ff77912f44ca22884450fd6677b9357d93.jpg)
512
+ (c) LCP
513
+
514
+ ![](images/11eae1f4d5b2d118345daf695a82ebf50b9a882c57e1308abe878e3c38fc4705.jpg)
515
+ (d) CCTP
516
+
517
+ ![](images/7acbe3a48a2ca744e1ef96fa30ddfdb79f5c9e7c1faf4ee4134aaae2e1cf89b3.jpg)
518
+ (e) ECTP
519
+
520
+ ![](images/3b237b105a172efd66d94b0da0ecc3b27bf505188da76ec7b03d90458c2db417.jpg)
521
+ (f) EUDTP
522
+
523
+ intervention is in general more effective than group-wise intervention except for RAND criterion. We find similar results for the comparison between I+B and G+B (see Figure 16). We also note that CCTP becomes less effective in G levels as seen in Figure 14.
524
+
525
+ Batch intervention is either more effective or at least as competitive as single intervention across different concept selection criteria as seen in Figure 17. In Figure 18, we observe that $\mathrm{G + B}$ are also more effective than $\mathrm{G + S}$ level. CCTP does not show much difference between S and B. It is because the target predictor $f$ is a simple linear layer for our experiments and thus $\frac{\partial f_j}{\partial\hat{c}_i} = w_{ij}$ is fixed for all examples where $w_{ij}$ is the weight of $i$ -th concept to $j$ -th class in $f$ .
526
+
527
+ # E. More Results on the Effect of Training Strategies on Intervention
528
+
529
+ ![](images/0a6f3fb3f7ae3743172e918d7e6c8ef72627c73ae0dc1c7e1773609f0f6d9bc8.jpg)
530
+ (a) IND
531
+ Figure 19: Comparison between concept selection criteria using different training strategies for the CUB. For JNT, JNT + P, we present the results when $\lambda = 0.01$ .
532
+
533
+ ![](images/e1f116cfd5533f1123de20f7dfec2d989cac3538fb0eb0ef519deb35554c28ab.jpg)
534
+ (b)SEQ
535
+
536
+ ![](images/fb8e3f90fb83fea48431e30ff4a44851c5bdb2971f0aeb10f92e8ead1955d224.jpg)
537
+ (c) JNT
538
+
539
+ ![](images/9ce240a4b842cc1aade3dcc39328217f0cc4b9202da0d7fd8f895ff748779673.jpg)
540
+ (d) JNT + P
541
+
542
+ The results for the CUB dataset are presented in Figure 19. Note that EUDTP becomes even less effective than RAND in SEQ
543
+
544
+ ![](images/afff0b0194ae5d8bd2ecacac7afa379ca2fdc251a22c69bf009b2fcaa5549d57.jpg)
545
+ (a) RAND
546
+
547
+ ![](images/568e3b0fedf70565b148f950df0e671fb411ef7817c7fbeff67bc180f3b1e875.jpg)
548
+ (b) UCP
549
+
550
+ ![](images/8e14a4b6c92e8963d8f44146a54ce2bb5ba8b39befb34e76f7d0caee03b2ddf1.jpg)
551
+ (c) LCP
552
+
553
+ ![](images/0c0e6db3becc5f4c12e7b857274c2f7119a8d627bb4386e4d3188d9a41fdfee8.jpg)
554
+ (d) CCTP
555
+
556
+ ![](images/1ad00de543abe49d1b7536fd974f2ecaae078aa355dcd8dfb75bdfaf5fabeb4e.jpg)
557
+ (e) ECTP
558
+
559
+ ![](images/708a56f6a5e7b254019865e2aef8220db8ea45df4535f2554fb44103e84a48f3.jpg)
560
+ (f) EUDTP
561
+
562
+ ![](images/c1a955cdadb5245c803c80d88a4332c6a8664d623a5b31cb5858d40766a7e558.jpg)
563
+ Figure 20: Comparison between different training strategies for a fixed concept selection criterion for the CUB.
564
+ (a) IND
565
+
566
+ ![](images/94e0e85f8f84775bbdb2e6e9d70c9c97f0bfe0a7dfa4137ac2a02847c9976b3b.jpg)
567
+ (b)SEQ
568
+
569
+ ![](images/9a9390cccdb4c18b0f43d414717c87b871517c903e51591104a31e20e8f050e2.jpg)
570
+ (c) JNT
571
+
572
+ ![](images/46722caa064e35b97cc618836b99f0441506bca8309ecd19fb34c839d276a793.jpg)
573
+ (d) JNT + P
574
+
575
+ ![](images/baebe8aeffb2c2d9f150e95904f0f639e49ab0452a0062a38dd62cd153662e6d.jpg)
576
+ Figure 21: Comparison between concept selection criteria using different training strategies for the Synthetic. For JNT, JNT + P, we present the results when $\lambda = 0.1$
577
+ (a) RAND
578
+ Figure 22: Comparison between different training strategies for a fixed concept selection criterion for the Synthetic.
579
+
580
+ ![](images/2e640a3700c0c8bd805b8620c89c338d12e260134e6ba80e215cf32ecba44bd3.jpg)
581
+ (b) UCP
582
+
583
+ ![](images/3fa9267cef5a3709cd788200905be2b6da142c209101f77eded43a65073e8287.jpg)
584
+ (c) LCP
585
+
586
+ ![](images/ae7be9355e2ed13cf54a875b82185cdbde91dae0ad1ca33556f10290e82e346d.jpg)
587
+ (d) CCTP
588
+
589
+ ![](images/c8dbb4e30743bf2c9483f48380118fc2fddf163ce631c9ffb91b66e9fe08bab5.jpg)
590
+ (e) ECTP
591
+
592
+ ![](images/98ca3b071417655ef05646b3e7065b2cac8c92496f1f5cc2ea1bbccddab81698.jpg)
593
+ (f) EUDTP
594
+
595
+ and JNT. For the synthetic datasets, EUDTP also becomes much less effective as in the CUB dataset (see Figure 21). Note that when using JNT or JNT+P training schemes, LCP may not be the best choice as the target predictor $f$ is not trained with the ground-truth concept values and thus rectifying the concept with the highest prediction loss does not always guarantee the decrease in the task error. Comparisons between different training strategies for a fixed concept selection criterion in the CUB and Synthetic are presented in Figures 20 and 22.
596
+
597
+ # F. More Results on the Effect of Conceptualization Methods on Intervention
598
+
599
+ ![](images/095c3644c9dcef0a54e25c122b86b072be92be054e7a050999c3f4c76d0680bd.jpg)
600
+ (a) RAND
601
+ Figure 23: Intervention results under different conceptualization methods using various concept selection criteria. Here, we used IND training strategy for the CUB.
602
+
603
+ ![](images/d83f293434f77f38f4cfaaea77cc9969eea478f62222688b0bfba2977cd343d4.jpg)
604
+ (b) UCP
605
+
606
+ ![](images/417320f299aa26e8aa06be3045983d06c8fdd3c1770c0065099c5c3a1ecb17e4.jpg)
607
+ (c) LCP
608
+
609
+ ![](images/9f1c9e32c3e725216217e5d437329db13298326d6b4ffa13f3d6dfdf2b0ea423.jpg)
610
+ (d) CCTP
611
+
612
+ ![](images/2f681d90b7744e838e32ca1f6c879181c1e837ccd00544aa757ec920c5c9e07e.jpg)
613
+ (e) ECTP
614
+
615
+ ![](images/ae908223ff74f3d598e9abf26dbf54c411285985dd35e0d01681ca9372f19824.jpg)
616
+ (f) EUDTP
617
+
618
+ Across all the datasets and concept selection criteria, utilizing effective criteria can reduce the gap between different conceptualization strategies much faster than RAND criterion as seen in Figures 23 to 27.
619
+
620
+ ![](images/4452223408a7db9001a4fbae532efe7b76e511a73ff875c89e11d3dbea58b649.jpg)
621
+ (a) RAND
622
+
623
+ ![](images/bfadd3374b735fbdbb884a79b1059899d04a439a75efe4ae967cbe41bea3219e.jpg)
624
+ (b) UCP
625
+
626
+ ![](images/c6d61d265fd74e88d6f366db53c1bb8cf33fb6ee2163094c0506ec0820eec2b7.jpg)
627
+ (c) LCP
628
+
629
+ ![](images/f921794b955f51e25782aed57f0b713a2430deeebbd1e8553832875c48a2282c.jpg)
630
+ (d) CCTP
631
+
632
+ ![](images/53aa8ee4e292873175f0a4bc9845aa673ad4033cfe603eb1ed52476444123e27.jpg)
633
+ (e) ECTP
634
+
635
+ ![](images/e79683049f44c6ff5d69d713866f4e3994c72d69e83e26a70447e89515d2104e.jpg)
636
+ (f) EUDTP
637
+
638
+ ![](images/acf7a994a940317428515c38849dfe1ea4c8622bf16268c51227abe736bd2126.jpg)
639
+ Figure 24: Intervention results under different conceptualization methods using various concept selection criteria. Here, we used JNT + P training strategy for the CUB.
640
+ (a) RAND
641
+
642
+ ![](images/10ad7803ec40f58146721bff7532ef56a3c64792b79822f577e8912db5ef5db7.jpg)
643
+ (b) UCP
644
+
645
+ ![](images/6fbe8b0405357043aab0dca457fdccc93fd39c6e86b679e0893f24ec20af0707.jpg)
646
+ (c) LCP
647
+
648
+ ![](images/1cfc370fd103bdeba49539dec14953c8fae06d93792e5eb37782cb2c22ceb8df.jpg)
649
+ (d) CCTP
650
+
651
+ ![](images/558f5c4a53161d95d62f08b0d20a5a82fe5e664e7adfc2d892cf674318e93fd9.jpg)
652
+ (e) ECTP
653
+
654
+ ![](images/e433da7abc541538ea0e750e89b1cf88db191a90a02bb2cbdc383a8fc156970b.jpg)
655
+ (f) EUDTP
656
+
657
+ ![](images/f9227c033380ed54ac57687814dfa793515dc71ec12fb7a12c5b178cf64a52d4.jpg)
658
+ Figure 25: Intervention results under different conceptualization methods using other concept selection criteria. Here, we used IND training strategy for the SkinCon.
659
+ (a) RAND
660
+
661
+ ![](images/c2200120fcdadd03e274a16738f9870c2fe86a05cf9ede9f1556411a62d08383.jpg)
662
+ (b) UCP
663
+
664
+ ![](images/c410a1c95d06da27ce8d97d9db573abaf3f21712105024f2c0fcdf601724e190.jpg)
665
+ (c) LCP
666
+
667
+ ![](images/83f1cc73a585b77b460456f6961b8eb9609745fb48701ae7770de18ce08aa20a.jpg)
668
+ (d) CCTP
669
+
670
+ ![](images/6353672af8cc5b85443ca5ec6486c79ab9c8dd778dcab4df402ba9dd15aa66a9.jpg)
671
+ (e) ECTP
672
+
673
+ ![](images/59f31c29dea0b5b2b7db3d76c883bbe0d349520c5e16f95df8a4c3ff42726d73.jpg)
674
+ (f) EUDTP
675
+
676
+ ![](images/caa7657b0acbf0148a630c0592489d91375ad8193fc508f51722f3c8664e8a41.jpg)
677
+ Figure 26: Intervention results under different conceptualization methods using various concept selection criteria. Here, we used IND training strategy for the synthetic dataset.
678
+ (a) RAND
679
+ Figure 27: Intervention results under different conceptualization methods using various concept selection criteria. Here, we used JNT + P training strategy for the synthetic dataset.
680
+
681
+ ![](images/cac16e3ad358b7bdae706b23711b4029f09ff67774ec6ec68d799d8ab4ad7417.jpg)
682
+ (b) UCP
683
+
684
+ ![](images/78d1a90a8ff4d2c0a94f8d5421be467677b67822b9dd65646cbe656b58133497.jpg)
685
+ (c) LCP
686
+
687
+ ![](images/b70509bc1f89848ed9219dd1b486b5a43bef4624104fe57fa7f2bbccfa6ac834.jpg)
688
+ (d) CCTP
689
+
690
+ ![](images/76f35e0147df16cf69ddfda050b05cf3a8a3e0e7141eb160a3fbc23e2b94629e.jpg)
691
+ (e) ECTP
692
+
693
+ ![](images/b0ef60fea46dc9b338d3dfd085db46a7a00173e8b8bb35fc1bf18ca5c57a5110.jpg)
694
+ (f) EUDTP
695
+
696
+ # G. More Results on the Effect of Data on Intervention
697
+
698
+ We find that intervention on data with extremely high input noise or extremely high diversity makes developed concept selection criteria less effective in general with a larger gap from LCP (see Figure 28). Specifically, UCP becomes less effective than other criteria in these cases. We assume that concept prediction uncertainty is rather uncorrelated with concept prediction loss when the concept predictor $g$ achieves very low accuracy.
699
+
700
+ We also evaluate the effect of concept sparsity levels, i.e., probability of each concept having value 0, using CCTP criterion. Note that intervention becomes less effective as the sparsity level gets closer to $50\%$ as seen in Figure 29a. To understand why, recall that this criterion aggregates the contribution of each concept to the target label prediction. When the sparsity level is high and most concepts have value 0, target prediction is determined by only a few concepts and CCTP can work effectively by first intervening on the concept with the highest contribution. In contrast, as the level gets closer to $50\%$ , target prediction is determined by almost half of the concepts and contribution on target prediction becomes no longer a
701
+
702
+ ![](images/0fbcd34d101770b3271820afea8a93033057080cabffdcf931856eba7f42b420.jpg)
703
+ (a) Data with extremely high input noise
704
+
705
+ ![](images/0e6850f4f137a7fb1d43a52515d75e2cd8d682323170f53251a227bc90c7b32d.jpg)
706
+ (b) Data with extremely high concept diversity
707
+
708
+ ![](images/a57cc036c6f13581175a75d2c6effde80f9e4fc8014065a361fcda0d8eed5b2e.jpg)
709
+ Figure 28: Intervention results on the data with extremely high input noise (variance of 2.0) or concept diversity (perturbation probability of $30\%$ ) respectively. In these cases, the proposed concept selection criteria work less effectively.
710
+ (a) CCTP with different concept sparsity levels
711
+
712
+ ![](images/65344e5ae882b3ae74696c55146077707c5fc3953694ad409738c20878510048.jpg)
713
+ (b) UCP with different subgroup sizes
714
+ Figure 29: (a) CCTP becomes more effective with a higher concept sparsity level. (b) Final task error increases, but intervention becomes more effective with larger sub-group sizes.
715
+
716
+ discriminative feature of the concepts, thus decreasing the effectiveness of the criterion. Furthermore, we observe that the final task error increases but intervention becomes more effective with a large sub-group size $\gamma$ (see Figure 29b). Specifically, we need 12 intervention counts to decrease the task error by half for the data with $\gamma = 1$ , but correcting 5 concepts achieve the same effect for $\gamma = 10$ . This is because intervention can decrease the task error much faster for mis-classified examples by distinguishing from similar classes when $\gamma$ is large.
717
+
718
+ # H. More Results on Fairness of Majority Voting
719
+
720
+ ![](images/19dd29bbc1ea688cf532a2c07371ce8482c532d52e58ca6014a94ee18d11ed5e.jpg)
721
+ (a) RAND
722
+ Figure 30: Comparison of test-time intervention results with and without using majority voting.
723
+
724
+ ![](images/6cd62907c76141925e70b069fa232068c7c8cd7d4b8cba0a3bad7cfa2df2adb9.jpg)
725
+ (b) UCP
726
+
727
+ ![](images/54b78cdd13f91b464c0ff3a4818659aa96ea41c9af1598dbc634d6b8e83a1b77.jpg)
728
+ (c) LCP
729
+
730
+ When we do not use majority voting on the CUB dataset, intervention rather increases the task error as seen in Figure 30. Specifically, intervention does not decrease task error at all with RAND, UCP. Even with LCP criterion, intervention does not reduce the task error as much as when we use majority voting, and the error rather starts to increase after about 10 concepts intervened. See Appendix B for the training details.
acloserlookattheinterventionprocedureofconceptbottleneckmodels/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e5d1e28de37fcf405dec9f5549cd9a56185dcc946015f1cd587c2cf77546882
3
+ size 1377800
acloserlookattheinterventionprocedureofconceptbottleneckmodels/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0ea9436111b0600312b62628922c4fe3499e086af8c67b8c5ae492660e8e940
3
+ size 1014165
acompleteexpressivenesshierarchyforsubgraphgnnsviasubgraphweisfeilerlehmantests/44a1d0cf-c56a-42a0-8785-7b5f1d6c9d78_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b83944708c2780dfe1951ffdd863a19178d8cdcfad8b8b2b8238f06228fb870
3
+ size 498360
acompleteexpressivenesshierarchyforsubgraphgnnsviasubgraphweisfeilerlehmantests/44a1d0cf-c56a-42a0-8785-7b5f1d6c9d78_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5972f1d6e41e3aeb44023d4788047ffd26d54d9579d33f4e558cc2a592602e0
3
+ size 604218
acompleteexpressivenesshierarchyforsubgraphgnnsviasubgraphweisfeilerlehmantests/44a1d0cf-c56a-42a0-8785-7b5f1d6c9d78_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33e7618a08120eff6bcd3d64d0902a25fa8111204976f28a054e10939c66f7f7
3
+ size 3379085
acompleteexpressivenesshierarchyforsubgraphgnnsviasubgraphweisfeilerlehmantests/full.md ADDED
The diff for this file is too large to render. See raw diff
 
acompleteexpressivenesshierarchyforsubgraphgnnsviasubgraphweisfeilerlehmantests/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37d5efb4407ba01a988890825993db4be13dc82fcd1145de0a3195c008ab92a9
3
+ size 2318395
acompleteexpressivenesshierarchyforsubgraphgnnsviasubgraphweisfeilerlehmantests/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ab35708819d08c83d075534747db5c79805a0492e59082c0dcefe70979b51ff
3
+ size 4169704
aconditionalnormalizingflowforacceleratedmulticoilmrimaging/dad5fa8b-794f-4094-bd4b-2f7c40ff0fce_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b6ca090caaaff0a2b2a13b0029c3002f4f2893772ebf093c201095e5da2ca27
3
+ size 96066
aconditionalnormalizingflowforacceleratedmulticoilmrimaging/dad5fa8b-794f-4094-bd4b-2f7c40ff0fce_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5eb4a30733c6e109cc636a38fbefe7ea5ae65ebf14150ccc86cbdd7e57ad8bee
3
+ size 116733
aconditionalnormalizingflowforacceleratedmulticoilmrimaging/dad5fa8b-794f-4094-bd4b-2f7c40ff0fce_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94b68a14932085f594b26e73b0a8faced413ed0c1efa4a87039574224daad2cf
3
+ size 7316093
aconditionalnormalizingflowforacceleratedmulticoilmrimaging/full.md ADDED
@@ -0,0 +1,415 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A Conditional Normalizing Flow for Accelerated Multi-Coil MR Imaging
2
+
3
+ Jeffrey Wen<sup>1</sup> Rizwan Ahmad<sup>2</sup> Philip Schniter<sup>1</sup>
4
+
5
+ # Abstract
6
+
7
+ Accelerated magnetic resonance (MR) imaging attempts to reduce acquisition time by collecting data below the Nyquist rate. As an ill-posed inverse problem, many plausible solutions exist, yet the majority of deep learning approaches generate only a single solution. We instead focus on sampling from the posterior distribution, which provides more comprehensive information for downstream inference tasks. To do this, we design a novel conditional normalizing flow (CNF) that infers the signal component in the measurement operator's nullspace, which is later combined with measured data to form complete images. Using fastMRI brain and knee data, we demonstrate fast inference and accuracy that surpasses recent posterior sampling techniques for MRI. Code is available at https://github.com/jwen307/mri_cnf
8
+
9
+ # 1. Introduction
10
+
11
+ Magnetic resonance imaging (MRI) is a routine diagnostic imaging tool that has the potential to provide high-quality soft-tissue images without exposure to ionizing radiation. However, MRI exams are generally time-consuming, which reduces throughput, compromises patient comfort, and increases the likelihood of artifacts from patient motion. Scan time can be reduced by sampling below the Nyquist rate, but this makes the image reconstruction process more challenging. Hence, recovering high-accuracy images from highly subsampled MRI scans has become an active area of research (Knoll et al., 2020).
12
+
13
+ Many approaches have been proposed to recover MR images from subsampled measurements. Parallel imaging, which is available on all commercial scanners, takes advantage of
14
+
15
+ $^{1}$ Dept. of ECE, The Ohio State University, Columbus, OH 43210, USA. $^{2}$ Dept. of BME, The Ohio State University, Columbus, OH 43210, USA. Correspondence to: Jeffrey Wen <wen.254@osu.edu>.
16
+
17
+ Proceedings of the $40^{th}$ International Conference on Machine Learning, Honolulu, Hawaii, USA. PMLR 202, 2023. Copyright 2023 by the author(s).
18
+
19
+ the availability of multiple receiver coils. After estimating coil-sensitivity maps or interpolation kernels, methods like SENSE (Pruessmann et al., 1999) and GRAPPA (Griswold et al., 2002) can use subsampled data from multiple coils to remove aliasing artifacts in the final reconstruction. However, parallel imaging alone can typically allow only two-to three-fold acceleration of the acquisition process. For higher acceleration, methods based on compressed-sensing (CS) have been proposed (Lustig et al., 2007). The CS methods are framed as iteratively minimizing the sum of a data-fidelity term and a regularization term, where the regularization term incorporates prior knowledge about the images. The prior knowledge could be that the true images are sparse in some transform domain, as in traditional CS, or that the true images are preserved by some denoising function, as in "plug-and-play" recovery (Ahmad et al., 2020). Deep neural networks have also been proposed for MR image recovery, based on end-to-end approaches like (Zbontar et al., 2018; Eo et al., 2018; Sriram et al., 2020) or algorithmic unrolling (Hammernik et al., 2018). Yet another approach, known as compressed sensing with a generative model (CSGM) (Bora et al., 2017), trains a deep image generator and then optimizes its input to give the image that, after application of the forward model, best matches the measurements.
20
+
21
+ Although they achieve high reconstruction quality, the aforementioned methods provide only a point estimate. Yet, accelerated MRI is an ill-posed inverse problem, where there exist many possible reconstructions that are consistent with a given prior and set of subsampled measurements. Since small variations in image content can impact the final diagnosis, it is crucial for radiologists to know whether a visual structure is truly reflective of the patient anatomy or merely an imaging artifact. Problems of this form fall into the realm of uncertainty quantification (UQ) (Abdar et al., 2021).
22
+
23
+ One approach that facilitates UQ is Bayesian imaging, where the goal is not to compute a single "good" image estimate but rather to sample from the posterior distribution. The availability of a large batch of posterior samples enables many forms of UQ. For example, a simple approach is to generate the pixel-wise standard-deviation map, which quantifies which pixels are more trustworthy. A more involved approach is to construct a hypothesis test for the absence of a particular (multi-pixel) visual structure (Repetti et al., 2019).
24
+
25
+ In this paper, we focus on the task of sampling from the posterior, which facilitates future work that uses those samples for uncertainty quantification, adaptive sampling (Sanchez et al., 2020), counterfactual diagnosis (Chang et al., 2019), or other applications.
26
+
27
+ There exist several deep-learning based approaches to sample from the posterior, including those based on conditional generative adversarial networks (CGANs) (Isola et al., 2017; Adler & Oktem, 2018), conditional variational autoencoders (CVAEs) (Edupuganti et al., 2021; Tonolini et al., 2020), conditional normalizing flows (CNFs) (Ardizzone et al., 2019; Winkler et al., 2019), and score/Langevin/diffusion-based approaches (Kadkhodaie & Simoncelli, 2020; Laumont et al., 2022; Ho et al., 2020). In this paper, we focus on the CNF approach. Compared to the other methods, CNFs yield rapid inference and require only simple, likelihood-based training. In a recent super-resolution (SR) contest (Lugmayr et al., 2022), a CNF (by Song et al. (2022)) won, beating all CGAN, CVAE, and diffusion-based competitors.
28
+
29
+ Inspired by the success of CNFs in SR, we design the first CNF for accelerated multi-coil MRI. Previous applications of CNFs to MRI (Denker et al., 2021a) showed competitive results but were restricted to single-coil recovery of magnitude images. As the vast majority of modern MRI scanners capture multi-coil data, the extension to multi-coil, complex-valued data is crucial for real-world adoption. However, the order-of-magnitude increase in dimensionality makes this transition non-trivial. For this purpose, we propose a novel CNF that infers only the signal component in the nullspace of the measurement operator and combines its output with the measured data to generate complete images. Using fastMRI brain and knee data, we demonstrate that our approach outperforms existing posterior samplers based on CGANs (Adler & Oktem, 2018) and MRI-specific score/Langevin-based approaches (Jalal et al., 2021a; Chung & Ye, 2022a) in almost all accuracy metrics, while retaining fast inference and requiring minimal hyperparameter tuning.
30
+
31
+ # 2. Background
32
+
33
+ # 2.1. Measurement Model
34
+
35
+ In MRI, measurements of the $D$ -pixel true image $\pmb{i}_{\mathrm{true}} \in \mathbb{C}^{D}$ are collected in the spatial Fourier domain, known as the "k-space." In a multi-coil system with $C$ coils, measurements from the $c$ th coil can be written as
36
+
37
+ $$
38
+ \boldsymbol {k} _ {c} = \boldsymbol {P F S} _ {c} \boldsymbol {i} _ {\text {t r u e}} + \boldsymbol {\epsilon} _ {c} \in \mathbb {C} ^ {M}, \tag {1}
39
+ $$
40
+
41
+ where $\pmb{P} \in \mathbb{R}^{M \times D}$ is a sampling matrix containing $M$ rows of the $D \times D$ identity matrix $\pmb{I}$ , $\pmb{F}$ is the $D \times D$ 2D unitary discrete Fourier transform (DFT) matrix, $S_{c} \in \mathbb{C}^{D \times D}$ is the coil-sensitivity map of the $c$ th coil, and $\epsilon_{c} \in \mathbb{C}^{M}$ is measurement noise. We will assume that $\{\pmb{S}_{c}\}_{c=1}^{C}$ have
42
+
43
+ been obtained from ESPIRiT (Uecker et al., 2014), in which case $\sum_{c=1}^{C} S_{c}^{\mathsf{H}} S_{c} = I$ . In the case of single-coil MRI, $C = 1$ and $S_{1} = I$ .
44
+
45
+ We now rewrite the model in terms of the "coil images" $\boldsymbol{x}_c \triangleq \boldsymbol{S}_c \boldsymbol{i}$ and their corresponding "zero-filled" estimates $\boldsymbol{y}_c \triangleq \boldsymbol{F}^{\mathsf{H}} \boldsymbol{P}^{\top} \boldsymbol{k}_c$ , and then stack all the coils together via $\boldsymbol{x}_{\mathrm{true}} \triangleq [\boldsymbol{x}_1^\top, \dots, \boldsymbol{x}_C^\top]^\top$ and $\boldsymbol{y} \triangleq [\boldsymbol{y}_1^\top, \dots, \boldsymbol{y}_C^\top]^\top$ to obtain
46
+
47
+ $$
48
+ \boldsymbol {y} = \boldsymbol {A} \boldsymbol {x} _ {\text {t r u e}} + \varepsilon , \tag {2}
49
+ $$
50
+
51
+ with $\varepsilon = [(\pmb{F}^{\mathsf{H}}\pmb{P}^{\top}\pmb{\epsilon}_{1})^{\top},\dots,(\pmb{F}^{\mathsf{H}}\pmb{P}^{\top}\pmb{\epsilon}_{C})^{\top}]^{\top}$ and forward operator
52
+
53
+ $$
54
+ \boldsymbol {A} = \operatorname {b l k d i a g} \left\{\boldsymbol {F} ^ {\mathsf {H}} \boldsymbol {P} ^ {\top} \boldsymbol {P} \boldsymbol {F}, \dots , \boldsymbol {F} ^ {\mathsf {H}} \boldsymbol {P} ^ {\top} \boldsymbol {P} \boldsymbol {F} \right.. \tag {3}
55
+ $$
56
+
57
+ To perform image recovery, one can first compute $\pmb{y}$ , then estimate $\widehat{\pmb{x}}$ from $\pmb{y}$ , and finally either "coil-combine" to yield a complex-valued image estimate
58
+
59
+ $$
60
+ \widehat {\boldsymbol {i}} = \left[ \boldsymbol {S} _ {1} ^ {\mathrm {H}}, \dots , \boldsymbol {S} _ {C} ^ {\mathrm {H}} \right] \widehat {\boldsymbol {x}} \tag {4}
61
+ $$
62
+
63
+ or perform root-sum-of-squares (RSS) reconstruction to obtain a magnitude-only image estimate
64
+
65
+ $$
66
+ \left| \widehat {\boldsymbol {i}} \right| = \sqrt {\sum_ {c = 1} ^ {C} \left| \widehat {\boldsymbol {x}} _ {c} \right| ^ {2}}. \tag {5}
67
+ $$
68
+
69
+ In the "fully sampled" case, $M = D$ and so $\mathbf{y} = \mathbf{x}_{\mathrm{true}} + \varepsilon$ . But fully sampled acquisition is very slow, and so we are interested in accelerated MRI, where one collects $M < D$ measurements per coil to save time. This gives an "acceleration factor" of $R \triangleq D / M$ , but it makes $A$ rank deficient. In this latter case, accurate recovery of $\mathbf{x}_{\mathrm{true}}$ requires the use of prior information about $\mathbf{x}_{\mathrm{true}}$ , such as the knowledge that $\mathbf{x}_{\mathrm{true}}$ is a vector of MRI coil images.
70
+
71
+ # 2.2. Posterior Sampling
72
+
73
+ In the case of MRI, the posterior distribution that we would ultimately like to sample from is $p_{i|k}(\cdot|\mathbf{k})$ , where $\mathbf{k} \triangleq [k_1^\top, \ldots, k_C^\top]$ . Equivalently, we could consider $p_{i|y}(\cdot|\mathbf{y})$ since $\mathbf{y}$ and $\mathbf{k}$ contain the same information. Another option is to sample from $p_{x|y}(\cdot|\mathbf{y})$ and then use (4) or (5) to combine coil images into a single image. We take the latter approach.
74
+
75
+ For CNFs and CGANs, posterior sampling is accomplished by designing a neural network that maps samples from an easy-to-generate latent distribution (e.g., white Gaussian) to the target distribution (i.e., the distribution of $x$ given $y$ , with density $p_{x|y}$ ). Once that network is trained, sample generation is extremely fast. For Langevin dynamics, an algorithm is run for hundreds or thousands of iterations to generate each sample, and each iteration involves calling a neural network. Consequently, the inference time is much longer than that of CNFs and CGANs.
76
+
77
+ # 2.3. Conditional Normalizing Flows
78
+
79
+ Normalizing flows (NF) (Dinh et al., 2015; 2017; Kingma & Dhariwal, 2018; Papamakarios et al., 2021) have emerged as powerful generative models capable of modeling complex data distributions. Normalizing flows learn an invertible mapping between a target data distribution and a simple latent distribution, generally a Gaussian. More concretely, for a latent sample $z$ drawn from the latent distribution $p_{z}$ , the normalizing flow defines an invertible transformation $f_{\theta}(\cdot): \mathbb{R}^{Q} \to \mathbb{R}^{Q}$ . This transformation is parameterized by $\theta$ , and $x = f_{\theta}(z)$ defines a sample in the target data domain. This mapping of the latent distribution induces a probability in the target data domain with a probability density derived from the change-of-variable formula
80
+
81
+ $$
82
+ \widehat {p} _ {x} (\boldsymbol {x}; \boldsymbol {\theta}) = p _ {z} (f _ {\boldsymbol {\theta}} ^ {- 1} (\boldsymbol {x})) \det \left(\frac {\partial f _ {\boldsymbol {\theta}} ^ {- 1} (\boldsymbol {x})}{\partial \boldsymbol {x}}\right), \qquad (6)
83
+ $$
84
+
85
+ where $\operatorname{det}(\cdot)$ denotes the determinant. The goal of the normalizing flow is to approximate the underlying data distribution $p_x$ with $\widehat{p}_x(\cdot;\theta)$ . Given a set of data samples $\{\pmb{x}^{(i)}\}_{i=1}^N$ , the parameters $\pmb{\theta}$ can be fit using a maximum likelihood loss
86
+
87
+ $$
88
+ \begin{array}{l} L (\boldsymbol {\theta}) = \sum_ {i = 1} ^ {N} \ln \widehat {p} _ {x} \left(\boldsymbol {x} ^ {(i)}; \boldsymbol {\theta}\right) (7) \\ = \sum_ {i = 1} ^ {N} \ln p _ {z} \left(f _ {\boldsymbol {\theta}} ^ {- 1} \left(\boldsymbol {x} ^ {(i)}\right)\right) + \ln \det \left(\frac {\partial f _ {\boldsymbol {\theta}} ^ {- 1} \left(\boldsymbol {x} ^ {(i)}\right)}{\partial \boldsymbol {x} ^ {(i)}}\right) (8) \\ \end{array}
89
+ $$
90
+
91
+ Once the training is complete, samples from the target distribution can be rapidly generated by drawing samples from the latent distribution and passing them through the normalizing flow $f_{\theta}$ .
92
+
93
+ It is worth noting that maximizing $L(\pmb{\theta})$ is equivalent to minimizing the Kullback-Leibler (KL) divergence between $\widehat{p}_x(\cdot; \pmb{\theta})$ and $p_x$ (Papamakarios et al., 2021), which aligns with the goal of approximating $p_x$ with $\widehat{p}_x(\cdot; \pmb{\theta})$ . The maximum-likelihood loss provides stable training with minimal hyperparameter tuning and has been shown to be robust to mode collapse.
94
+
95
+ Conditional normalizing flows (CNFs) (Ardizzone et al., 2021) generalize normalizing flows by adding a conditioning signal $\pmb{y}$ . With the CNF denoted as $h_{\pmb{\theta}}(\cdot, \cdot): \mathbb{R}^Q \times \mathbb{R}^Q \to \mathbb{R}^Q$ , the forward process from the latent domain to the data domain is given by $\pmb{x} = h_{\pmb{\theta}}(\pmb{z}, \pmb{y})$ . For complex-valued, multi-coil MRI, we have $Q = 2CD$ . The inclusion of $\pmb{y}$ alters the objective of the CNF to approximating the unknown posterior distribution $p_{x|y}(\cdot|\pmb{y})$ with $\widehat{p}_{x|y}(\cdot|\pmb{y}; \pmb{\theta})$ . As before, the change-of-variable formula implies the induced distribution
96
+
97
+ $$
98
+ \widehat {p} _ {x \mid y} (\boldsymbol {x} \mid \boldsymbol {y}; \boldsymbol {\theta}) = p _ {z} \left(h _ {\boldsymbol {\theta}} ^ {- 1} (\boldsymbol {x}, \boldsymbol {y})\right) \det \left(\frac {\partial h _ {\boldsymbol {\theta}} ^ {- 1} (\boldsymbol {x} , \boldsymbol {y})}{\partial \boldsymbol {x}}\right), \tag {9}
99
+ $$
100
+
101
+ where $h_{\theta}^{-1}$ refers to the inverse mapping of $h_\theta$ with respect to its first argument.
102
+
103
+ Given a dataset $\{(\pmb{x}^{(i)},\pmb{y}^{(i)})\}_{i = 1}^{N}$ , the maximum likelihood loss can be utilized to optimize the parameters $\pmb{\theta}$
104
+
105
+ $$
106
+ \begin{array}{l} L (\boldsymbol {\theta}) = \sum_ {i = 1} ^ {N} \ln \widehat {p} _ {x | y} \left(\boldsymbol {x} ^ {(i)} \mid \boldsymbol {y} ^ {(i)}; \boldsymbol {\theta}\right) (10) \\ = \sum_ {i = 1} ^ {N} \ln p _ {z} \left(h _ {\boldsymbol {\theta}} ^ {- 1} \left(\boldsymbol {x} ^ {(i)}, \boldsymbol {y} ^ {(i)}\right)\right) + \ln \det \left(\frac {\partial h _ {\boldsymbol {\theta}} ^ {- 1} \left(\boldsymbol {x} ^ {(i)} , \boldsymbol {y} ^ {(i)}\right)}{\partial \boldsymbol {x} ^ {(i)}}\right) (11) \\ \end{array}
107
+ $$
108
+
109
+ CNFs have shown promising performance in solving inverse problems, such as super-resolution (Lugmayr et al., 2020; Kim & Son, 2021; Song et al., 2022), making it an exciting avenue of exploration for accelerated MRI. Denker et al. (2021a) developed a CNF for single-coil, magnitude-only knee images. This study showed promising initial results, but the limited scope did not demonstrate performance in the more realistic multi-coil, complex-valued domain. As this transition increases the dimensionality by an order of magnitude, non-trivial architectural changes are required. In this paper, we build on the latest advances in CNFs to create a method that is capable of generating high-quality posterior samples of multi-coil, complex-valued MRI images.
110
+
111
+ # 3. Method
112
+
113
+ Our CNF consists of two networks, a conditioning network $g_{\theta}$ and a conditional flow model $h_{\theta}$ . The conditioning network takes the vector of zero-filled (ZF) coil-images $\mathbf{y}$ as input and produces features that are used as conditioning information by the flow model $h_{\theta}$ . Aided by the conditioning information, $h_{\theta}$ learns an invertible mapping between samples in the latent space and those in the image space. Using the notation of Sec. 2.3, our overall CNF takes the form
114
+
115
+ $$
116
+ \bar {h} _ {\boldsymbol {\theta}} (\boldsymbol {z}, \boldsymbol {y}) \triangleq h _ {\boldsymbol {\theta}} (\boldsymbol {z}, g _ {\boldsymbol {\theta}} (\boldsymbol {y})). \tag {12}
117
+ $$
118
+
119
+ Recently, advancements of CNFs in the super-resolution literature have revealed useful insights for more general inverse problems. First, Lugmayr et al. (2020) suggested the use of a pretrained, state-of-the-art point-estimate network for the conditioning network $g_{\theta}$ . This network is then trained jointly with $h_{\theta}$ using the loss in (11). This approach provides a functional initialization of $g_{\theta}$ and allows $g_{\theta}$ to learn to provide features that are useful for the maximum-likelihood training objective. We utilize a UNet from (Zbontar et al., 2018) for $g_{\theta}$ since it has been shown to perform well in accelerated MRI. We first pre-train $g_{\theta}$ for MRI recovery, and later we jointly train $g_{\theta}$ and $h_{\theta}$ together.
120
+
121
+ Song et al. (2022) demonstrated the benefits of using "frequency-separation" when training a CNF for super
122
+
123
+ ![](images/fab4df3487c3346fa1b4bf50d374b04e24b7713fbde1f5314b9ec0b3d0645283.jpg)
124
+ Figure 1. The architecture of our CNF. The conditioning network $g_{\theta}$ takes in multi-coil zero-filled image estimates $\mathbf{y}$ and outputs features used by the flow model $h_{\theta}$ . The flow learns an invertible mapping between Gaussian random samples $\mathbf{z}^{(i)}$ and images $\mathbf{u}^{(i)}$ that are the projections of the training images $\mathbf{x}^{(i)}$ onto the non-measured subspace.
125
+
126
+ resolution. The authors argue that the low-resolution conditional image already contains sufficient information about the low-frequency components of the image, so the CNF can focus on recovering only the high-frequency information. The CNF output is then added to an upsampled version of the conditional image to yield an estimate of the full image.
127
+
128
+ We now generalize the frequency-separation idea to arbitrary linear models of the form $\pmb{y} = \pmb{A}\pmb{x}_{\mathrm{true}} + \varepsilon$ from (2) and apply the resulting procedure to MRI. Notice that (2) implies
129
+
130
+ $$
131
+ \boldsymbol {A} ^ {+} \boldsymbol {y} = \boldsymbol {A} ^ {+} \boldsymbol {A} \boldsymbol {x} _ {\text {t r u e}} + \boldsymbol {A} ^ {+} \varepsilon \tag {13}
132
+ $$
133
+
134
+ where $(\cdot)^{+}$ denotes the pseudo-inverse. Here, $A^{+}Ax_{\mathrm{true}}$ is recognized as the projection of $x_{\mathrm{true}}$ onto the row-space of $A$ , which we will refer to as the "measured space." Then
135
+
136
+ $$
137
+ \boldsymbol {u} _ {\text {t r u e}} \triangleq (\boldsymbol {I} - \boldsymbol {A} ^ {+} \boldsymbol {A}) \boldsymbol {x} _ {\text {t r u e}} \tag {14}
138
+ $$
139
+
140
+ would be the projection of $\pmb{x}_{\mathrm{true}}$ onto its orthogonal complement, which we refer to as the "nullspace." Assuming that the nullspace has dimension $> 0$ , we propose to construct an estimate $\widehat{\pmb{x}}$ of $\pmb{x}_{\mathrm{true}}$ with the form
141
+
142
+ $$
143
+ \widehat {\boldsymbol {x}} (\boldsymbol {z}, \boldsymbol {y}) = (\boldsymbol {I} - \boldsymbol {A} ^ {+} \boldsymbol {A}) \bar {h} _ {\boldsymbol {\theta}} (\boldsymbol {z}, \boldsymbol {y}) + \boldsymbol {A} ^ {+} \boldsymbol {y}, \tag {15}
144
+ $$
145
+
146
+ where $\overline{h}_{\pmb{\theta}}(\pmb {z},\pmb {y})$ is our CNF-generated estimate of $\pmb{u}_{\mathrm{true}}$ and the $(I - A^{+}A)$ in (15) strips off any part of $\overline{h}_{\pmb{\theta}}(\pmb {z},\pmb {y})$ that has leaked into the measured space. A similar approach was used in (Sønderby et al., 2017) for point estimation. Given training data $\{(x^{(i)},y^{(i)})\}_{i = 1}^{N}$ , the CNF $\overline{h}_{\pmb{\theta}}(\cdot ,\cdot)$ is trained to map code vectors $z^{(i)}\sim p_z$ to the nullspace projections
147
+
148
+ $$
149
+ \boldsymbol {u} ^ {(i)} \triangleq (\boldsymbol {I} - \boldsymbol {A} ^ {+} \boldsymbol {A}) \boldsymbol {x} ^ {(i)} \tag {16}
150
+ $$
151
+
152
+ using the measured data $\pmb{y}^{(i)}$ as the conditional information. As a result of (15), the reconstructions $\widehat{\pmb{x}}$ agree with the measurements $\pmb{y}$ in that $A\widehat{\pmb{x}} = \pmb{y}$ . However, this also means that $\widehat{\pmb{x}}$ inherits the noise $\varepsilon$ corrupting $\pmb{y}$ , and so this data-consistency procedure is best used in the low-noise regime. In the presence of significant noise, the dual-decomposition approach (Chen & Davies, 2020) may be more appropriate.
153
+
154
+ In the accelerated MRI formulation (1)-(3), the matrix $\mathbf{A}$ is itself an orthogonal projection matrix, so that, in (15),
155
+
156
+ $$
157
+ \boldsymbol {I} - \boldsymbol {A} ^ {+} \boldsymbol {A} = \operatorname {b l k d i a g} \left\{\boldsymbol {F} ^ {\mathrm {H}} \tilde {\boldsymbol {P}} ^ {\top} \tilde {\boldsymbol {P}} \boldsymbol {F}, \dots , \boldsymbol {F} ^ {\mathrm {H}} \tilde {\boldsymbol {P}} ^ {\top} \tilde {\boldsymbol {P}} \boldsymbol {F} \right., \tag {17}
158
+ $$
159
+
160
+ where $\widetilde{\pmb{P}}\in \mathbb{R}^{(D - M)\times D}$ is the sampling matrix for the nonmeasured k-space. Also, $\pmb{y}$ is in the row-space of $\pmb{A}$ , so
161
+
162
+ $$
163
+ \boldsymbol {A} ^ {+} \boldsymbol {y} = \boldsymbol {y} \tag {18}
164
+ $$
165
+
166
+ in (15). Figure 1 illustrates the overall procedure, using "data consistency" to describe (15) and "nullspace projection" to describe (16). In Sec. 4.2, we quantitatively demonstrate the improvements gained from designing our CNF to estimate only the nullspace component.
167
+
168
+ # 3.1. Architecture
169
+
170
+ The backbone of $g_{\theta}$ is a UNet (Ronneberger et al., 2015) that mimics the design in (Zbontar et al., 2018), with 4 pooling layers and 128 output channels in the first convolution layer. The first layer was modified to accept complex-valued coil images. The inputs have $2C$ channels, where $C$ is the number of coils, each with a real and imaginary component.
171
+
172
+ The outputs of the final feature layer of the UNet are processed by a feature-extraction network with $L$ convolution layers. Together, the feature extraction network and the UNet make up our conditioning network $g_{\theta}$ . The output of each convolution layer is fed to conditional coupling blocks of the corresponding layer in $h_{\theta}$ .
173
+
174
+ For the flow model $h_\theta$ , we adopt the multi-scale RealNVP (Dinh et al., 2017) architecture. This construction utilizes $L$ -layers and $B$ -flow steps in each layer. A flow step consists of an activation normalization (Kingma & Dhariwal, 2018), a fixed $1 \times 1$ orthogonal convolution (Ardizzone et al., 2019), and a conditional coupling block (Ardizzone et al., 2021). Each layer begins with a checkerboard downsampling (squeeze layer) (Dinh et al., 2017) and a transition step made up of an activation normalization and $1 \times 1$ convolution. Layers end with a split operation that sends half of the channels directly to the output on the latent side. For all experiments, we use $L = 3$ and $B = 20$ . The full architecture of $h_\theta$ is specified in Fig. 1.
175
+
176
+ Although the code that accompanies (Denker et al., 2021a) gives a built-in mechanism to scale their flow architecture to accommodate an increased number of input and output channels, we find that this mechanism does not work well (see Sec. 4.2). Thus, in addition to incorporating nullspace learning, we redesign several aspects of the flow architecture and training. First, to prevent the number of flow parameters from growing unreasonably large, our flow uses fewer downsampling layers (3 vs 6) but more flow steps per down-sampling layer (20 vs 5), and we utilize one-sided (instead of two-sided) affine coupling layers. Second, to connect the conditioning network to the flow, Denker et al. (2021a) used a separate CNN for each flow layer and adjusted its depth to match the flow-layer dimension. We use a single, larger CNN and feed its intermediate features to the flow layers with matched dimensions, further preventing an explosion in the number of parameters. Third, our conditioning network uses a large, pretrained UNet, whereas Denker et al. (2021a) used a smaller untrained UNet. With our modifications, we grow the conditional network more than the flow network, which allows the CNF to better handle the high dimensionality of complex-valued, multi-coil data.
177
+
178
+ # 3.2.Data
179
+
180
+ We apply our network to two datasets: the fastMRI knee and fastMRI brain datasets (Zbontar et al., 2018). For the knee data, we use the non-fat-suppressed subset, giving 17286 training and 3592 validation images. We compress the measurements to $C = 8$ complex-valued virtual coils using (Zhang et al., 2013) and crop the images to $320 \times 320$ pixels. The sampling mask is generated using the golden ratio offset (GRO) (Joshi et al., 2022) Cartesian sampling scheme with an acceleration rate $R = 4$ and autocalibration signal (ACS)
181
+
182
+ region of 13 pixels. We create the ZF coil-image vectors $\pmb{y}$ by applying the mask and inverse Fourier transform to the fully sampled $\pmb{k}_c$ given by the fastMRI dataset to obtain $\pmb{y}_c = \pmb{F}^{\mathsf{H}}\pmb{P}^{\top}\pmb{P}\pmb{k}_c$ for all $c$ , and then stack the coils to obtain $\pmb{y} = [\pmb{y}_1^{\top},\dots,\pmb{y}_C^{\top}]^{\top}$ . We create the ground-truth coil-image vectors $\pmb{x}_{\mathrm{true}}$ using the same procedure but without the mask, i.e., $\pmb{x}_c = \pmb{F}^{\mathsf{H}}\pmb{k}_c$ and $\pmb{x}_{\mathrm{true}} = [\pmb{x}_1^{\top},\dots,\pmb{x}_C^{\top}]^{\top}$ .
183
+
184
+ With the brain data, we use the T2-weighted images and take the first 8 slices of all volumes with at least 8 coils. This provides 12224 training and 3352 validation images. The data is compressed to $C = 8$ virtual coils (Zhang et al., 2013) and cropped to $384 \times 384$ pixels. The GRO sampling scheme is again used with an acceleration rate $R = 4$ and a 32-wide ACS region. For both methods, the coil-sensitivity maps are estimated from the ACS region using ESPIiT (Uecker et al., 2014). All inputs to the network are normalized by the 95th percentile of the ZF magnitude images.
185
+
186
+ # 3.3. Training
187
+
188
+ For both datasets, we first train the UNet in $g_{\theta}$ with an additional $1 \times 1$ convolution layer to get the desired $2C$ channels. We train the UNet to minimize the mean-squared error (MSE) from the nullspace projected targets $\{\pmb{u}^{(i)}\}_{i=1}^{N}$ for 50 epochs with batch size 8 and learning rate 0.003. Then, we remove the final $1 \times 1$ convolution and jointly train $g_{\theta}$ and $h_{\theta}$ for 100 epochs to minimize the negative log-likelihood (NLL) loss of the nullspace projected targets. For the brain data, we use batch size 8 and learning rate 0.0003. For the knee data, we use batch size 16 with learning rate 0.0005. All experiments use the Adam optimizer (Kingma & Ba, 2015) with default parameters $\beta_1 = 0.9$ and $\beta_2 = 0.999$ . The full training takes about 4 days on 4 Nvidia V100 GPUs.
189
+
190
+ # 3.4. Comparison Methods
191
+
192
+ We compare against other methods that are capable of generating posterior samples for accelerated MRI. For the fastMRI brain data, we present results for the CGAN from (Adler & Oktem, 2018) and the Langevin method from (Jalal et al., 2021a). For the fastMRI knee data, we present results for the "Score" method from (Chung & Ye, 2022a) and the "sCNF" method from (Denker et al., 2021a).
193
+
194
+ For the CGAN, we utilize a UNet-based generator with 4 pooling layers and 128 output channels in the initial layer and a 5-layer CNN network for the discriminator. The generator takes $y$ concatenated with a latent vector $z$ as input. The model is trained with the default loss and hyperparameters from (Adler & Oktem, 2018) for 100 epochs with a learning rate of 0.001. For the Langevin method, we use the authors' implementation but with the GRO sampling mask described in Sec. 3.2.
195
+
196
+ The Score method is different than the other methods in that
197
+
198
+ ![](images/53574374058362d89f5b15b78f9479e23c27a40c61d0f362c0e3842e93b8c35f.jpg)
199
+ Figure 2. Mean images and pixel-wise standard-deviation maps computed from 8 and 32 posterior samples for the brain images and knee datasets, respectively. The standard-deviation maps show which pixels have the greatest reconstruction uncertainty. The corresponding PSNR is shown on each reconstruction.
200
+
201
+ it assumes that the k-space measurements $\pmb{k}_c$ are constructed from true coil images $\pmb{x}_{\mathrm{true}}$ with magnitudes affinely normalized to the interval [0, 1] and phases normalized to [0, 1] radians. Although this normalization cannot be enforced on prospectively undersampled MRI data, Score fails without this normalization. So, to evaluate Score, we normalize each $\pmb{k}_c$ using knowledge of the ground-truth $\pmb{x}_c$ , run Score, and un-normalized its output $\widehat{\pmb{x}}_c$ for comparison with the other methods. Since the Score paper (Chung & Ye, 2022a) used RSS combining to compute $\widehat{\pmb{i}}$ , we do the same. For the Score method, we use $T = 200$ iterations and not the default value of $T = 2000$ . This is because, when using posterior-sample averaging (see Sec. 3.5), the PSNR computed using 200 iterations is better than with 2000.
202
+
203
+ The sCNF method works only on single-coil magnitude data, and so we convert our multi-coil data to that domain in order to evaluate sCNF. To do this, we apply RSS (5) to ZF coil-images $\pmb{y}$ and repeat the process for the true coil images $\pmb{x}_{\mathrm{true}}$ . Using those magnitude images, we train sCNF for 300 epochs with learning rate 0.0005 and batch size 32.
204
+
205
+ # 3.5. Evaluation
206
+
207
+ We report results for several different metrics, including peak-signal-to-noise ratio (PSNR), structural-similarity index (SSIM) (Wang et al., 2004), Fréchet Inception Score (FID) (Heusel et al., 2017), and conditional FID (cFID)
208
+
209
+ (Soloveitchik et al., 2021). PSNR and SSIM were computed on the average of $P$ posterior samples $\{i_p\}_{p = 1}^P$ , i.e.,
210
+
211
+ $$
212
+ \widehat {\boldsymbol {i}} _ {(P)} \triangleq \frac {1}{P} \sum_ {p = 1} ^ {P} \widehat {\boldsymbol {i}} _ {p} \tag {19}
213
+ $$
214
+
215
+ to approximate the posterior mean, while FID and cFID were evaluated on individual posterior samples $\widehat{\pmb{i}}_p$ . By default, we compute all metrics using magnitude reconstructions $|\widehat{\pmb{i}}|$ rather than the complex-valued reconstructions $\widehat{\pmb{i}}$ , in part because competitors like sCNF generate only magnitude reconstructions, but also because this is typical in the MRI literature (e.g., the fastMRI competition (Zbontar et al., 2018)). So, for example, PSNR is computed as
216
+
217
+ $$
218
+ \mathrm {P S N R} \triangleq 1 0 \log_ {1 0} \left(\frac {D \max _ {d} | [ \boldsymbol {i} _ {\text {t r u e}} ] _ {d} | ^ {2}}{| \hat {\boldsymbol {i}} _ {(P)} | - | \boldsymbol {i} _ {\text {t r u e}} |} _ {2} ^ {2}\right), \tag {20}
219
+ $$
220
+
221
+ where $[\cdot ]_d$ extracts the $d$ th pixel. For FID and cFID, we use the embeddings of VGG-16 (Simonyan & Zisserman, 2014) as (Kastryulin et al., 2022) found that this helped the metrics better correlate with the rankings of radiologists.
222
+
223
+ For the brain data, we compute all metrics on 72 random test images in order to limit the Langevin image generation time to 4 days. We generate complex-valued images using the coil-combining method in (4) and use $P = 32$ posterior samples to calculate cFID $^1$ , FID $^1$ , PSNR, and SSIM. (For the reference statistics of FID, we use the entire training dataset.) Because FID and cFID are biased by small sample sizes, we also compute FID $^2$ and cFID $^2$ with 2484 test samples and $P = 8$ for our method and the CGAN.
224
+
225
+ With the knee data, we follow a similar evaluation procedure except that, to comply with the evaluation steps of Score, we generate magnitude-only signals using the root-sum-of-square (RSS) combining from (5). Also, we computed metrics on 72 randomly selected slices in order to bound the image generation time of Score to 6 days with $P = 8$ . We use $P = 8$ for all metrics, but for $\mathrm{FID}^2$ and $\mathrm{cFID}^2$ , we use 2188 test samples.
226
+
227
+ When computing inference time for all methods, we use a single Nvidia V100 with 32GB of memory and evaluate the time required to generate one posterior sample.
228
+
229
+ # 4. Results
230
+
231
+ Table 1 reports the quantitative metrics for the knee dataset. It shows that our method outperforms sCNF by a significant margin in all metrics except inference time. By using information from multiple coils and a more advanced architecture, our method shows the true competitive potential of CNFs in realistic accelerated MR imaging.
232
+
233
+ Table 1 also shows that our method surpasses Score in all metrics except $\mathrm{FID}^1$ , even though Score benefited from im
234
+
235
+ ![](images/a367693ae773cbc0a6419e62f70638494a8ab8687d94d4cb1b31bcb9f897cc62.jpg)
236
+ Figure 3. Examples of posterior samples and standard-deviation maps for the knee data. The samples show important structural variations. This demonstrates the advantages of generating multiple reconstructions and computing a pixel-wise standard-deviation map.
237
+
238
+ practical ground-truth normalization. Compared to Score, our method generated posterior samples $8000 \times$ faster. Furthermore, our method (and sCNF) will see a speedup when multiple samples are generated because the conditioning network $g_{\theta}$ needs to be evaluated only once per $P$ generated samples for a given $\mathbf{y}$ . For example, with the knee data, we are able to generate $P = 32$ samples in 1.41 seconds, corresponding to 44 milliseconds per sample, which is a $2.5 \times$ speedup over the value reported in Table 1.
239
+
240
+ Table 2 reports the quantitative results for the brain dataset. The table shows that we outperform the Langevin and CGAN methods in all benchmarks except inference time. While our method is a bit slower than the CGAN, it is orders of magnitude faster than the Langevin approach.
241
+
242
+ We show the mean images and standard-deviation maps for the fastMRI knee and brain experiments in Fig. 2. For the knee data, our method captures texture more accurately than the sCNF method and provides a sharper representation than the Score method. All of the brain methods provide a visually accurate representation to the ground truth, but the Langevin method provides a more diffuse variance map, with energy spread throughout the image.
243
+
244
+ In Fig. 3, we plot multiple posterior samples, along with zoomed-in regions, to illustrate the changes across independently drawn samples for each method. The standard-deviation maps are generated using $P = 8$ posterior samples, three of which are shown. From the zoomed-in regions, it can be seen that several samples are consistent with the
245
+
246
+ <table><tr><td>Model</td><td>PSNR (dB)↑</td><td>SSIM↑</td><td>FID1↓</td><td>FID2↓</td><td>cFID1↓</td><td>cFID2↓</td><td>Time</td></tr><tr><td>Score</td><td>34.15 ± 0.19</td><td>0.8764 ± 0.0036</td><td>4.49</td><td>—</td><td>4.49</td><td>—</td><td>15 min</td></tr><tr><td>sCNF</td><td>32.93 ± 0.17</td><td>0.8494 ± 0.0047</td><td>7.32</td><td>5.78</td><td>8.49</td><td>6.51</td><td>66 ms</td></tr><tr><td>Ours</td><td>35.23 ± 0.22</td><td>0.8888 ± 0.0046</td><td>4.68</td><td>2.55</td><td>3.96</td><td>2.44</td><td>108 ms</td></tr></table>
247
+
248
+ Table 1. Average performance on non-fat-suppressed fastMRI knee data, with standard error reported after the ±. PSNR, SSIM, FID<sup>1</sup>, and cFID<sup>1</sup> are computed for 72 test images and $P = 8$ posterior samples. FID<sup>2</sup>, and cFID<sup>2</sup> are computed for 2188 test samples and $P = 8$ posterior samples. Time to the generation of one posterior sample.
249
+
250
+ <table><tr><td>Model</td><td>PSNR (dB)↑</td><td>SSIM↑</td><td>FID1↓</td><td>FID2↓</td><td>cFID1↓</td><td>cFID2↓</td><td>Time</td></tr><tr><td>Langevin</td><td>37.88 ± 0.41</td><td>0.9042 ± 0.0062</td><td>6.12</td><td>—</td><td>5.29</td><td>—</td><td>14 min</td></tr><tr><td>CGAN</td><td>37.28 ± 0.19</td><td>0.9413 ± 0.0031</td><td>5.38</td><td>4.06</td><td>6.41</td><td>4.28</td><td>112 ms</td></tr><tr><td>Ours</td><td>38.85 ± 0.23</td><td>0.9495 ± 0.0012</td><td>4.13</td><td>2.37</td><td>4.15</td><td>2.44</td><td>177 ms</td></tr></table>
251
+
252
+ Table 2. Average performance on non-fat-suppressed fastMRI brain data, with standard error reported after the ±. PSNR, SSIM, FID<sup>1</sup>, and cFID<sup>1</sup> are computed for 72 test images and $P = 32$ posterior samples. FID<sup>2</sup> and cFID<sup>2</sup> are computed using 2484 test samples and $P = 8$ . Time to the generation of one posterior sample.
253
+
254
+ ground truth while others are not (although they may be consistent with the measured data). Regions of high posterior variation can be flagged from visual inspection of the standard-deviation map and further investigated through viewing multiple posterior samples for improved clinical diagnoses.
255
+
256
+ Our method presents observable, realistic variations of small anatomical features in the zoomed-in regions. The variations are also registered in the standard-deviation map. Both the posterior samples and the standard-deviation map could be used by clinicians to assess their findings. Comparatively, our method demonstrates variation that is spread across the entire image, while in the Score method, the variation is mostly localized to small regions. Since it is difficult to say which standard-deviation map is more useful or correct, the interpretation of these maps could be an interesting direction for future work. The sCNF also demonstrates variation, but it is mostly driven by residual aliasing artifacts.
257
+
258
+ # 4.1. PSNR Gain versus $P$
259
+
260
+ It is well known that the minimum MSE (MMSE) estimate of $\pmb{i}$ from $\pmb{y}$ equals the conditional mean $\operatorname{E}\{\pmb{i}|\pmb{y}\}$ , i.e., the mean of the posterior distribution $p_{i|y}(\cdot|\pmb{y})$ . Thus, one way to approximate the MMSE estimate is to generate many samples from the posterior distribution and average them, as in (19). Bendel et al. (2022) showed that the MSE
261
+
262
+ $$
263
+ \mathcal {E} _ {P} \triangleq \mathrm {E} \left[ \| \widehat {\boldsymbol {i}} _ {(P)} - \boldsymbol {i} _ {\text {t r u e}} \| _ {2} ^ {2} \boldsymbol {y} \right] \tag {21}
264
+ $$
265
+
266
+ of the $P$ -posterior-sample average $\widehat{\pmb{i}}_{(P)}$ obeys $\mathcal{E}_1 / \mathcal{E}_P = 2P / (P + 1)$ . So, for example, the SNR increases by a factor of two as $P$ grows from 1 to $\infty$ . The same thing should happen for PSNR, as long as the PSNR definition is consistent with (21). For positive signals (i.e., magnitude images) the PSNR definition from (20) is consistent with
267
+
268
+ ![](images/8401e8986ff68ea6121598d2a03abc61eac46d68de6e0fc5d30b2a4358c834ba.jpg)
269
+
270
+ ![](images/cea1483d3379f41ce51a51a58d24d831013a3d1cc653188e08d019ba75fb80bb.jpg)
271
+
272
+ ![](images/c80d529ea0db3bf231fb57115de48674124048df40363bf54a837be30be8442e.jpg)
273
+ Figure 4. The gain in (magnitude) PSNR and complex PSNR of the $P$ -sample mean estimate $\hat{\pmb{i}}_{(P)}$ versus $P$ , for both brain and knee data. Note the $\approx 3$ dB increase as $P$ grows from 1 to infinity.
274
+
275
+ ![](images/611a272a8849dcc82a8f61da41025964b6b78f52d0bf84518a6b8b0534c7b9d8.jpg)
276
+
277
+ (21), but for complex signals we must use "complex PSNR"
278
+
279
+ $$
280
+ \mathrm {c P S N R} \triangleq 1 0 \log_ {1 0} \left(\frac {D \max _ {d} | [ i _ {\text {t r u e}} ] _ {d} | ^ {2}}{\| \hat {i} _ {(P)} - i _ {\text {t r u e}} \| _ {2} ^ {2}}\right). \tag {22}
281
+ $$
282
+
283
+ As RSS combining provides only a magnitude estimate, we compute the coil-combined estimate for our method and Score to evaluate cPSNR behavior for the knee dataset.
284
+
285
+ One may then wonder whether a given approximate posterior sampler has a PSNR gain versus $P$ that matches the theory. In Fig. 4, we answer this question by plotting the PSNR gain and the cPSNR gain versus $P \in \{1,2,4,8,16,32\}$ for the various methods under test (averaged over all 72 test samples). There we see that our method's cPSNR curve matches the theoretical curve well for both brain and knee data. As expected, our (magnitude) PSNR curve does not match the theoretical curve. The cPSNR curves of the Score and CGAN methods fall short of the theoretical curve by
286
+
287
+ <table><tr><td>Model</td><td>PSNR (dB)↑</td><td>SSIM↑</td><td>FID2↓</td><td>cFID2↓</td></tr><tr><td>(Denker et al., 2021a)</td><td>17.61 ± 0.20</td><td>0.6665 ± 0.0072</td><td>16.02</td><td>16.68</td></tr><tr><td>+ Data Consistency</td><td>27.27 ± 0.21</td><td>0.7447 ± 0.0061</td><td>16.92</td><td>18.56</td></tr><tr><td>+ Architectural Changes</td><td>33.87 ± 0.23</td><td>0.8715 ± 0.0049</td><td>4.48</td><td>4.50</td></tr><tr><td>+ Nullspace Learning</td><td>35.23 ± 0.22</td><td>0.8888 ± 0.0046</td><td>2.55</td><td>2.44</td></tr></table>
288
+
289
+ Table 3. Ablation Study: Performance on non-fat-suppressed fastMRI knee data, with standard error reported after the $\pm$ . Each line adds a new contribution to the model of the previous line. Metrics are computed as described in Sec. 3.5
290
+
291
+ ![](images/18652740898464339030523853ec4ec0431231738a802f34cc9383b7ccaf5b75.jpg)
292
+ Figure 5. Examples of a ground-truth image, one posterior sample, an average of $P = 8$ posterior samples, and a MAP estimate. The log posterior density in units of bits-per-dimension is shown in the bottom right corner of each image.
293
+
294
+ a large margin, but interestingly, the Langevin method's cPSNR curve matches ours almost perfectly. sCNF's PSNR gain curve matches the theoretical one almost perfectly, which provides further empirical evidence that CNF methods accurately sample from the posterior distribution.
295
+
296
+ # 4.2. Ablation Study
297
+
298
+ To evaluate the impact of our contributions to CNF architecture and training design, we perform an ablation study using the fastMRI knee dataset. We start with the baseline model in (Denker et al., 2021a), modified to take in 16 channels instead of 1, and scale it up using the built-in mechanism in the author's code. We train this model for 300 epochs with batch size 32 and learning rate 0.0001 to minimize the NLL of the multicoil targets $\{\pmb{x}^{(i)}\}$ , since higher learning rates were numerically unstable. Table 3 shows what happens when we add each of our contributions. First, we add data consistency (15) to the evaluation of the baseline. We then add the architectural changes described in Sec. 3.1, and finally we add nullspace learning to arrive at our proposed method. From Table 3, it can be seen that each of our design contributions yielded a significant boost in performance, and that nullspace learning was a critical ingredient in our outperforming the Score method in Table 1. For this ablation study, all models were trained following the procedure outlined in Sec. 3.3 (except for the learning rate of the baseline).
299
+
300
+ # 4.3. Maximum a Posteriori (MAP) Estimation
301
+
302
+ Because CNFs can evaluate the posterior density of a signal hypothesis (recall (9)), they can be used for posteriori (MAP) estimation, unlike CGANs.
303
+
304
+ Due to our data-consistency step (15), we find the MAP estimate of $\pmb{x}$ using
305
+
306
+ $$
307
+ \widehat {\boldsymbol {x}} _ {\mathrm {M A P}} = \widehat {\boldsymbol {u}} _ {\mathrm {M A P}} + \boldsymbol {A} ^ {+} \boldsymbol {y} \tag {23}
308
+ $$
309
+
310
+ $$
311
+ \widehat {\boldsymbol {u}} _ {\text {M A P}} = \arg \max _ {\boldsymbol {u} \in \operatorname {n u l l} (\boldsymbol {A})} \ln \widehat {p} _ {u | y} (\boldsymbol {u} | \boldsymbol {y}). \tag {24}
312
+ $$
313
+
314
+ Note the CNF output $\pmb{u}$ is constrained to the nullspace of $\pmb{A}$ . From (17), this nullspace is spanned by the columns of
315
+
316
+ $$
317
+ \boldsymbol {W} \triangleq \operatorname {b l k d i a g} \left\{\boldsymbol {F} ^ {\mathrm {H}} \widetilde {\boldsymbol {P}} ^ {\top}, \dots , \boldsymbol {F} ^ {\mathrm {H}} \widetilde {\boldsymbol {P}} ^ {\top} \right., \tag {25}
318
+ $$
319
+
320
+ which are orthonormal, and so $\widehat{\pmb{u}}_{\mathrm{MAP}} = \pmb {W}\widetilde{\pmb{k}}_{\mathrm{MAP}}$ with
321
+
322
+ $$
323
+ \begin{array}{l} \widetilde {\boldsymbol {k}} _ {\text {M A P}} = \underset {\widetilde {\boldsymbol {k}}} {\arg \max } \ln \widehat {p} _ {u | y} (\boldsymbol {W} \widetilde {\boldsymbol {k}} | \boldsymbol {y}; \boldsymbol {\theta}) (26) \\ = \arg \max _ {\widetilde {\boldsymbol {k}}} \left[ \ln p _ {z} \left(\bar {h} _ {\boldsymbol {\theta}} ^ {- 1} \left(\boldsymbol {W} \widetilde {\boldsymbol {k}}, \boldsymbol {y}\right)\right) \right. \\ \left. + \ln \det \left(\frac {\partial \bar {h} _ {\boldsymbol {\theta}} ^ {- 1} (\tilde {\boldsymbol {u}} , \boldsymbol {y})}{\partial \tilde {\boldsymbol {u}}} \right. _ {\tilde {\boldsymbol {u}} = \boldsymbol {W} \tilde {\boldsymbol {k}}}\right) \Bigg ]. (27) \\ \end{array}
324
+ $$
325
+
326
+ For this maximization, we use the Adam optimizer with 5000 iterations and a learning rate of $1 \times 10^{-8}$ . Above, $\widetilde{k}$ can be recognized as the unmeasured k-space samples.
327
+
328
+ In Figure 5, we show an example of a MAP estimate along with the ground truth image, one sample from the posterior, a $P = 8$ posterior-sample average, and their corresponding log-posterior-density values. As expected, the MAP estimate has a higher log-posterior-density than the other estimates. Visually, the MAP estimate is slightly sharper than the sample average but contains less texture details than the single posterior sample.
329
+
330
+ # 5. Conclusion
331
+
332
+ In this work, we present the first conditional normalizing flow for posterior sample generation in multi-coil accelerated MRI. To do this, we designed a novel conditional normalizing flow (CNF) that infers the signal component in the measurement operator's nullspace, whose outputs are later combined with information from the measured space. In experiments with fastMRI brain and knee data, we demonstrate improvements over existing posterior samplers for MRI. Compared to score/Langevin-based approaches, our inference time is four orders-of-magnitude faster. We also illustrate how the posterior samples can be used to quantify uncertainty in MR imaging. This provides radiologists with additional tools to enhance the robustness of clinical diagnoses. We hope this work motivates additional exploration of posterior sampling for accelerated MRI.
333
+
334
+ # Acknowledgements
335
+
336
+ This work was supported in part by the National Institutes of Health under Grant R01-EB029957.
337
+
338
+ # References
339
+
340
+ Abdar, M., Pourpanah, F., Hussain, S., Rezazadegan, D., Liu, L., Ghavamzadeh, M., Fieguth, P., Cao, X., Khosravi, A., Acharya, U. R., et al. A review of uncertainty quantification in deep learning: Techniques, applications and challenges. Information Fusion, 76:243-297, 2021.
341
+ Adler, J. and Oktem, O. Deep Bayesian inversion. arXiv:1811.05910, 2018.
342
+ Ahmad, R., Bouman, C. A., Buzzard, G. T., Chan, S., Liu, S., Reehorst, E. T., and Schniter, P. Plug and play methods for magnetic resonance imaging. IEEE Signal Process. Mag., 37(1):105-116, March 2020.
343
+ Ardizzone, L., Bungert, T., Draxler, F., Kothe, U., Kruse, J., Schmier, R., and Sorrenson, P. Framework for Easily Invertible Architectures (FrEIA). https://github.com/vislearn/FrEIA, 2018. Accessed: 2022-11-05.
344
+ Ardizzone, L., Lüth, C., Kruse, J., Rother, C., and Köthe, U. Guided image generation with conditional invertible neural networks. arXiv:1907.02392, 2019.
345
+ Ardizzone, L., Kruse, J., Lüth, C., Bracher, N., Rother, C., and Köthe, U. Conditional invertible neural networks for diverse image-to-image translation. arXiv:2105.02104, 2021.
346
+ Bendel, M., Ahmad, R., and Schniter, P. A regularized conditional GAN for posterior sampling in inverse problems. arXiv:2210.13389, 2022.
347
+ Bora, A., Jalal, A., Price, E., and Dimakis, A. G. Compressed sensing using generative models. In Proc. Int. Conf. Mach. Learn., pp. 537-546, 2017.
348
+ Chang, C.-H., Creager, E., Goldenberg, A., and Duvenaud, D. Explaining image classifiers by counterfactual generation. In Proc. Int. Conf. on Learn. Rep., 2019.
349
+ Chen, D. and Davies, M. E. Deep decomposition learning for inverse imaging problems. In Proc. European Conf. Comp. Vision, pp. 510-526, 2020.
350
+ Chung, H. and Ye, J. C. Score-based diffusion models for accelerated MRI. Med. Image Analysis, 80:102479, 2022a.
351
+ Chung, H. and Ye, J. C. score-MRI. https://github.com/HJ-harry/score-MRI, 2022b. Accessed: 2022-12-15.
352
+ Denker, A., Schmidt, M., Leuschner, J., and Maass, P. Conditional invertible neural networks for medical imaging. J. Imaging, 7(11):243, 2021a.
353
+
354
+ Denker, A., Schmidt, M., Leuschner, J., and Maass, P. cinn-for-imaging. https://github.com/jleuschn/cinn_for_imaging, 2021b. Accessed: 2022-10-08.
355
+ Dinh, L., Krueger, D., and Bengio, Y. NICE: Non-linear independent components estimation. In Proc. Int. Conf. on Learn. Rep. Workshops, 2015.
356
+ Dinh, L., Sohl-Dickstein, J., and Bengio, S. Density estimation using Real NVP. In Proc. Int. Conf. on Learn. Rep., 2017.
357
+ Edupuganti, V., Mardani, M., Vasanawala, S., and Pauly, J. Uncertainty quantification in deep MRI reconstruction. IEEE Trans. Med. Imag., 40(1):239-250, January 2021.
358
+ Eo, T., Jun, Y., Kim, T., Jang, J., Lee, H.-J., and Hwang, D. KIKI-net: Cross-domain convolutional neural networks for reconstructing undersampled magnetic resonance images. Magn. Reson. Med., 80(5):2188-2201, 2018.
359
+ Falcon, W. et al. Pytorch lightning, 2019. URL https://github.com/PyTorchLightning/pytorch-lightning.
360
+ Griswold, M. A., Jakob, P. M., Heidemann, R. M., Nittka, M., Jellus, V., Wang, J., Kiefer, B., and Haase, A. Generalized autocalibrating partially parallel acquisitions (GRAPPA). Magn. Reson. Med., 47(6):1202-1210, 2002.
361
+ Hammernik, K., Klatzer, T., Kobler, E., Recht, M. P., Sodickson, D. K., Pock, T., and Knoll, F. Learning a variational network for reconstruction of accelerated MRI data. Magn. Reson. Med., 79(6):3055-3071, 2018.
362
+ Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., and Hochreiter, S. GANs trained by a two time-scale update rule converge to a local Nash equilibrium. In Proc. Neural Inf. Process. Syst. Conf., volume 30, 2017.
363
+ Ho, J., Jain, A., and Abbeel, P. Denoising diffusion probabilistic models. In Proc. Neural Inf. Process. Syst. Conf., volume 33, pp. 6840-6851, 2020.
364
+ Isola, P., Zhu, J.-Y., Zhou, T., and Efros, A. A. Image-to-image translation with conditional adversarial networks. In Proc. IEEE Conf. Comp. Vision Pattern Recog., pp. 1125-1134, 2017.
365
+ Jalal, A., Arvinte, M., Daras, G., Price, E., Dimakis, A., and Tamir, J. Robust compressed sensing MRI with deep generative priors. In Proc. Neural Inf. Process. Syst. Conf., 2021a.
366
+ Jalal, A., Arvinte, M., Daras, G., Price, E., Dimakis, A., and Tamir, J. csgm-mri-langevin. https://github.com/utcsilab/csgm-mri-langevin, 2021b. Accessed: 2021-12-05.
367
+
368
+ Joshi, M., Pruitt, A., Chen, C., Liu, Y., and Ahmad, R. Technical report (v1.0)-pseudo-random cartesian sampling for dynamic MRI. arXiv:2206.03630, 2022.
369
+ Kadkhodaie, Z. and Simoncelli, E. P. Solving linear inverse problems using the prior implicit in a denoiser. arXiv:2007.13640, 2020.
370
+ Kastryulin, S., Zakirov, J., Pezzotti, N., and Dylov, D. V. Image quality assessment for magnetic resonance imaging. arXiv:2203.07809, 2022.
371
+ Kim, Y. and Son, D. Noise conditional flow model for learning the super-resolution space. arXiv:1606.02838, 2021.
372
+ Kingma, D. and Dhariwal, P. Glow: Generative flow with invertible 1x1 convolutions. In Proc. Neural Inf. Process. Syst. Conf., pp. 10236-10245, 2018.
373
+ Kingma, D. P. and Ba, J. Adam: A method for stochastic optimization. In Proc. Int. Conf. on Learn. Rep., 2015.
374
+ Knoll, F., Hammernik, K., Zhang, C., Moeller, S., Pock, T., Sodickson, D. K., and Akcakaya, M. Deep-learning methods for parallel magnetic resonance imaging reconstruction: A survey of the current approaches, trends, and issues. IEEE Signal Process. Mag., 37(1):128-140, January 2020.
375
+ Laumont, R., Bortoli, V. D., Almansa, A., Delon, J., Durmus, A., and Pereyra, M. Bayesian imaging using plug & play priors: When Langevin meets Tweedie. SIAM J. Imag. Sci., 15(2):701-737, 2022.
376
+ Lugmayr, A., Danelljan, M., Van Gool, L., and Timofte, R. SRFlow: Learning the super-resolution space with normalizing flow. In Proc. European Conf. Comp. Vision, 2020.
377
+ Lugmayr, A., Danelljan, M., Timofte, R., Kim, K.-w., Kim, Y., Lee, J.-y., Li, Z., Pan, J., Shim, D., Song, K.-U., et al. NTIRE 2022 challenge on learning the super-resolution space. In Proc. IEEE Conf. Comp. Vision Pattern Recog., pp. 786-797, 2022.
378
+ Lustig, M., Donoho, D., and Pauly, J. M. Sparse MRI: The application of compressed sensing for rapid MR imaging. Magn. Reson. Med., 58(6):1182-1195, 2007.
379
+ Ong, F. and Lustig, M. SigPy: A python package for high performance iterative reconstruction. In Proc. Annu. Meeting ISMRM, volume 4819, 2019.
380
+ Papamakarios, G., Nalisnick, E. T., Rezende, D. J., Mohamed, S., and Lakshminarayanan, B. Normalizing flows for probabilistic modeling and inference. J. Mach. Learn. Res., 22(57):1-64, 2021.
381
+
382
+ Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., Desmaison, A., Kopf, A., Yang, E., DeVito, Z., Raison, M., Tejani, A., Chilamkurthy, S., Steiner, B., Fang, L., Bai, J., and Chintala, S. PyTorch: An imperative style, high-performance deep learning library. In Proc. Neural Inf. Process. Syst. Conf., pp. 8024-8035, 2019.
383
+ Pruessmann, K. P., Weiger, M., Scheidegger, M. B., and Boesiger, P. SENSE: Sensitivity encoding for fast MRI. Magn. Reson. Med., 42(5):952-962, 1999.
384
+ Repetti, A., Pereyra, M., and Wiaux, Y. Scalable bayesian uncertainty quantification in imaging inverse problems via convex optimization. SIAM J. Imag. Sci., 12(1):87-118, 2019.
385
+ Ronneberger, O., Fischer, P., and Brox, T. U-Net: Convolutional networks for biomedical image segmentation. In Proc. Intl. Conf. Med. Image Comput. Comput. Assist. Intervent., pp. 234–241, 2015.
386
+ Sanchez, T., Krawczuk, I., Sun, Z., and Cevher, V. Uncertainty-driven adaptive sampling via GANs. In Proc. Neural Inf. Process. Syst. Workshop, 2020.
387
+ Simonyan, K. and Zisserman, A. Very deep convolutional networks for large-scale image recognition. arXiv:1409.1556, 2014.
388
+ Soloveitchik, M., Diskin, T., Morin, E., and Wiesel, A. Conditional Frechet inception distance. arXiv:2103.11521, 2021.
389
+ Sonderby, C. K., Caballero, J., Theis, L., Shi, W., and Huszár, F. Amortised MAP inference for image superresolution. In Proc. Int. Conf. on Learn. Rep., 2017.
390
+ Song, K.-U., Shim, D., Kim, K.-w., Lee, J.-y., and Kim, Y. FS-NCSR: Increasing diversity of the super-resolution space via frequency separation and noise-conditioned normalizing flow. In Proc. IEEE Conf. Comp. Vision Pattern Recog. Workshop, pp. 968-977, June 2022.
391
+ Sriram, A., Zbontar, J., Murrell, T., Defazio, A., Zitnick, C. L., Yakubova, N., Knoll, F., and Johnson, P. End-to-end variational networks for accelerated MRI reconstruction. In Proc. Intl. Conf. Med. Image Comput. Comput. Assist. Intervent., pp. 64-73, 2020.
392
+ Tonolini, F., Radford, J., Turpin, A., Faccio, D., and Murray-Smith, R. Variational inference for computational imaging inverse problems. J. Mach. Learn. Res., 21(179): 1-46, 2020.
393
+ Uecker, M., Lai, P., Murphy, M. J., Virtue, P., Elad, M., Pauly, J. M., Vasanawala, S. S., and Lustig, M. ESPIRiT-an eigenvalue approach to autocalibrating parallel MRI:
394
+
395
+ Where SENSE meets GRAPPA. Magn. Reson. Med., 71 (3):990-1001, 2014.
396
+ Wang, Z., Bovik, A. C., Sheikh, H. R., and Simoncelli, E. P. Image quality assessment: From error visibility to structural similarity. IEEE Trans. Image Process., 13(4): 600-612, Apr. 2004.
397
+ Winkler, C., Worrall, D., Hoogeboom, E., and Welling, M. Learning likelihoods with conditional normalizing flows. arXiv preprint arXiv:1912.00042, 2019.
398
+ Zbontar, J., Knoll, F., Sriram, A., Muckley, M. J., Bruno, M., Defazio, A., Parente, M., Geras, K. J., Katsnelson, J., Chandarana, H., Zhang, Z., Drozdzal, M., Romero, A., Rabbat, M., Vincent, P., Pinkerton, J., Wang, D., Yakubova, N., Owens, E., Zitnick, C. L., Recht, M. P., Sodickson, D. K., and Lui, Y. W. fastMRI: An open dataset and benchmarks for accelerated MRI. arXiv:1811.08839, 2018.
399
+ Zhang, T., Pauly, J. M., Vasanawala, S. S., and Lustig, M. Coil compression for accelerated imaging with Cartesian sampling. Magn. Reson. Med., 69(2):571-582, 2013.
400
+
401
+ # A. Accelerated MRI Simulation Procedure
402
+
403
+ We outline the procedure for simulating the accelerated MRI problem. The fastMRI datasets provide the fully sampled multi-coil k-space, i.e., $\{\pmb{k}_c\}_{c=1}^C$ with $M = D$ . To obtain the ground truth coil-images $\{\pmb{x}_c\}_{c=1}^C$ , we take the inverse Fourier transform of the fully sampled k-space measurement, i.e., $\pmb{x}_c = \pmb{F}^\mathsf{H}\pmb{k}_c$ , wherein we assume that the noise $\epsilon_c$ in (1) is negligible. To obtain the zero-filled images $\pmb{y}_c$ , we take the inverse Fourier transform after masking the fully-sampled k-space measurement $\pmb{k}_c$ , i.e., $\pmb{y}_c = \pmb{F}^\mathsf{H}\pmb{P}^\top \pmb{P}\pmb{k}_c$ . This procedure is illustrated in Fig. 6. In real-world accelerated MRI, the data acquisition process would collect masked k-space $\pmb{P}\pmb{k}_c$ directly.
404
+
405
+ ![](images/eb3969344556faa806a9eb1c54f964671a5e1095589df13166f1c1682a155671.jpg)
406
+ Figure 6. A visual illustration of simulating accelerated MRI. Given the fully sampled k-space $\pmb{k}_c$ highlighted in blue, we obtain the ground truth $\pmb{x}_c$ by applying the inverse Fourier transform $\pmb{F}^{\mathsf{H}}$ . The zero-filled image $\pmb{y}_c$ is acquired by applying the sampling mask $\pmb{P}^{\top}\pmb{P}$ to fully sampled $\pmb{k}_c$ and then taking the inverse Fourier Transform $\pmb{F}^{\mathsf{H}}$ .
407
+
408
+ # B. Implementation Details
409
+
410
+ For our machine learning framework, we use PyTorch (Paszke et al., 2019) and PyTorch lightning (Falcon et al., 2019). To implement the components of the CNF, we use the Framework for Easily Invertible Architectures (FrEIA) (Ardizzone et al., 2018). For the Score, sCNF, and Langevin methods, we utilize the authors' implementations at (Chung & Ye, 2022b), (Denker et al., 2021b), and (Jalal et al., 2021b), respectively. ESPIRiT coil-estimation and coil-combining are implemented using the SigPy package (Ong & Lustig, 2019).
411
+
412
+ # C. Brain Posterior Samples
413
+
414
+ ![](images/9225fcbf7f9d238fa5895ab73ab7d12449e52a85c714ff886b864829cc30db50.jpg)
415
+ Figure 7. Examples of posterior samples and standard-deviation maps for the brain images, both with zoomed regions.
aconditionalnormalizingflowforacceleratedmulticoilmrimaging/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2946d6bdd5ffa22a5424e269caf080886e830f55091cfdedc0d816d8754da3a
3
+ size 843827
aconditionalnormalizingflowforacceleratedmulticoilmrimaging/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f77b31825277d977c52ec1187508212dbfa9df90488193beff27b43430e5ecaf
3
+ size 576311
aconnectionbetweenonesteprlandcriticregularizationinreinforcementlearning/ee44bd65-4ebd-4d5a-9321-fa647ed48b24_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04e3135abb95610b4664e21b760cf26645fff3f79eacbf4ce234c22c0b833703
3
+ size 178072
aconnectionbetweenonesteprlandcriticregularizationinreinforcementlearning/ee44bd65-4ebd-4d5a-9321-fa647ed48b24_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f29d0a4434013095ed9bb20dae3de3f240999baa168f0d4078f48edf5925e303
3
+ size 213172
aconnectionbetweenonesteprlandcriticregularizationinreinforcementlearning/ee44bd65-4ebd-4d5a-9321-fa647ed48b24_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd7548c08c7ff1c0c4803613d90eadce4a2805988a236b08ca08d855b59916b5
3
+ size 727361
aconnectionbetweenonesteprlandcriticregularizationinreinforcementlearning/full.md ADDED
The diff for this file is too large to render. See raw diff
 
aconnectionbetweenonesteprlandcriticregularizationinreinforcementlearning/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b35d2cea99ba842541e0af7674a5d8c27c3dc52827cc0fc7d89c8607191ba3d5
3
+ size 1034461
aconnectionbetweenonesteprlandcriticregularizationinreinforcementlearning/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8f5952de6adeaf0430e1321959a3799c78e877d6504ac3dd895473e46898183
3
+ size 899529
acoupledflowapproachtoimitationlearning/865a27f8-93e6-494a-8b17-c293ff834e3b_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43d664f62fc3d118f521c5a1b574581e7f8041677431849a41dec2488c48c557
3
+ size 101427
acoupledflowapproachtoimitationlearning/865a27f8-93e6-494a-8b17-c293ff834e3b_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f5e121c42ecf7f83fb34a8fd109062780a20fcb811b19f2c0acbc6f519cf2ae
3
+ size 125974