Add Batch 404cd5f0-5c28-4e42-b145-3844988a14b4
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- acloserlookattheadversarialrobustnessofdeepequilibriummodels/1c4d0023-75c5-417a-a020-3170c9fa8df7_content_list.json +3 -0
- acloserlookattheadversarialrobustnessofdeepequilibriummodels/1c4d0023-75c5-417a-a020-3170c9fa8df7_model.json +3 -0
- acloserlookattheadversarialrobustnessofdeepequilibriummodels/1c4d0023-75c5-417a-a020-3170c9fa8df7_origin.pdf +3 -0
- acloserlookattheadversarialrobustnessofdeepequilibriummodels/full.md +400 -0
- acloserlookattheadversarialrobustnessofdeepequilibriummodels/images.zip +3 -0
- acloserlookattheadversarialrobustnessofdeepequilibriummodels/layout.json +3 -0
- acombinatorialperspectiveontheoptimizationofshallowrelunetworks/423a6c06-21c0-4880-ac32-ea9b8a7ddda0_content_list.json +3 -0
- acombinatorialperspectiveontheoptimizationofshallowrelunetworks/423a6c06-21c0-4880-ac32-ea9b8a7ddda0_model.json +3 -0
- acombinatorialperspectiveontheoptimizationofshallowrelunetworks/423a6c06-21c0-4880-ac32-ea9b8a7ddda0_origin.pdf +3 -0
- acombinatorialperspectiveontheoptimizationofshallowrelunetworks/full.md +310 -0
- acombinatorialperspectiveontheoptimizationofshallowrelunetworks/images.zip +3 -0
- acombinatorialperspectiveontheoptimizationofshallowrelunetworks/layout.json +3 -0
- acommunicationefficientalgorithmwithlinearconvergenceforfederatedminimaxlearning/782b67b1-2c1a-418c-8e65-adfd5e280fda_content_list.json +3 -0
- acommunicationefficientalgorithmwithlinearconvergenceforfederatedminimaxlearning/782b67b1-2c1a-418c-8e65-adfd5e280fda_model.json +3 -0
- acommunicationefficientalgorithmwithlinearconvergenceforfederatedminimaxlearning/782b67b1-2c1a-418c-8e65-adfd5e280fda_origin.pdf +3 -0
- acommunicationefficientalgorithmwithlinearconvergenceforfederatedminimaxlearning/full.md +462 -0
- acommunicationefficientalgorithmwithlinearconvergenceforfederatedminimaxlearning/images.zip +3 -0
- acommunicationefficientalgorithmwithlinearconvergenceforfederatedminimaxlearning/layout.json +3 -0
- acommunicationefficientdistributedgradientclippingalgorithmfortrainingdeepneuralnetworks/58789780-5503-41ae-b7e1-b83721f07851_content_list.json +3 -0
- acommunicationefficientdistributedgradientclippingalgorithmfortrainingdeepneuralnetworks/58789780-5503-41ae-b7e1-b83721f07851_model.json +3 -0
- acommunicationefficientdistributedgradientclippingalgorithmfortrainingdeepneuralnetworks/58789780-5503-41ae-b7e1-b83721f07851_origin.pdf +3 -0
- acommunicationefficientdistributedgradientclippingalgorithmfortrainingdeepneuralnetworks/full.md +342 -0
- acommunicationefficientdistributedgradientclippingalgorithmfortrainingdeepneuralnetworks/images.zip +3 -0
- acommunicationefficientdistributedgradientclippingalgorithmfortrainingdeepneuralnetworks/layout.json +3 -0
- acomprehensivestudyonlargescalegraphtrainingbenchmarkingandrethinking/44f8a9af-1701-4f03-aea5-43b9bc59c0bc_content_list.json +3 -0
- acomprehensivestudyonlargescalegraphtrainingbenchmarkingandrethinking/44f8a9af-1701-4f03-aea5-43b9bc59c0bc_model.json +3 -0
- acomprehensivestudyonlargescalegraphtrainingbenchmarkingandrethinking/44f8a9af-1701-4f03-aea5-43b9bc59c0bc_origin.pdf +3 -0
- acomprehensivestudyonlargescalegraphtrainingbenchmarkingandrethinking/full.md +364 -0
- acomprehensivestudyonlargescalegraphtrainingbenchmarkingandrethinking/images.zip +3 -0
- acomprehensivestudyonlargescalegraphtrainingbenchmarkingandrethinking/layout.json +3 -0
- aconditionalrandomizationtestforsparselogisticregressioninhighdimension/a0a09a92-db33-4a74-8880-36b12ccc9eff_content_list.json +3 -0
- aconditionalrandomizationtestforsparselogisticregressioninhighdimension/a0a09a92-db33-4a74-8880-36b12ccc9eff_model.json +3 -0
- aconditionalrandomizationtestforsparselogisticregressioninhighdimension/a0a09a92-db33-4a74-8880-36b12ccc9eff_origin.pdf +3 -0
- aconditionalrandomizationtestforsparselogisticregressioninhighdimension/full.md +375 -0
- aconditionalrandomizationtestforsparselogisticregressioninhighdimension/images.zip +3 -0
- aconditionalrandomizationtestforsparselogisticregressioninhighdimension/layout.json +3 -0
- aconsistentanddifferentiablelpcanonicalcalibrationerrorestimator/b31aa3fd-b339-43fb-b204-ff8038c186e3_content_list.json +3 -0
- aconsistentanddifferentiablelpcanonicalcalibrationerrorestimator/b31aa3fd-b339-43fb-b204-ff8038c186e3_model.json +3 -0
- aconsistentanddifferentiablelpcanonicalcalibrationerrorestimator/b31aa3fd-b339-43fb-b204-ff8038c186e3_origin.pdf +3 -0
- aconsistentanddifferentiablelpcanonicalcalibrationerrorestimator/full.md +348 -0
- aconsistentanddifferentiablelpcanonicalcalibrationerrorestimator/images.zip +3 -0
- aconsistentanddifferentiablelpcanonicalcalibrationerrorestimator/layout.json +3 -0
- aconsolidatedcrossvalidationalgorithmforsupportvectormachinesviadatareduction/0662105f-d79c-4957-bbb9-48afa250a3f7_content_list.json +3 -0
- aconsolidatedcrossvalidationalgorithmforsupportvectormachinesviadatareduction/0662105f-d79c-4957-bbb9-48afa250a3f7_model.json +3 -0
- aconsolidatedcrossvalidationalgorithmforsupportvectormachinesviadatareduction/0662105f-d79c-4957-bbb9-48afa250a3f7_origin.pdf +3 -0
- aconsolidatedcrossvalidationalgorithmforsupportvectormachinesviadatareduction/full.md +480 -0
- aconsolidatedcrossvalidationalgorithmforsupportvectormachinesviadatareduction/images.zip +3 -0
- aconsolidatedcrossvalidationalgorithmforsupportvectormachinesviadatareduction/layout.json +3 -0
- acontinuoustimeframeworkfordiscretedenoisingmodels/bffa17a7-7231-490e-b95b-b193fd9d4377_content_list.json +3 -0
- acontinuoustimeframeworkfordiscretedenoisingmodels/bffa17a7-7231-490e-b95b-b193fd9d4377_model.json +3 -0
acloserlookattheadversarialrobustnessofdeepequilibriummodels/1c4d0023-75c5-417a-a020-3170c9fa8df7_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:66f8db3a430c15f4d67cba63cdfec5318246dbe2028970478374e8e11da94451
|
| 3 |
+
size 93539
|
acloserlookattheadversarialrobustnessofdeepequilibriummodels/1c4d0023-75c5-417a-a020-3170c9fa8df7_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a4c0b21ed1f48fafdf2e30ffb5f6396be09893b3d114d334cfed26cae40c0da5
|
| 3 |
+
size 114856
|
acloserlookattheadversarialrobustnessofdeepequilibriummodels/1c4d0023-75c5-417a-a020-3170c9fa8df7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c73786fa95819bf2dd7b1be4f8c5e1b36383a8a01ca71c41fcb2907130487655
|
| 3 |
+
size 1843414
|
acloserlookattheadversarialrobustnessofdeepequilibriummodels/full.md
ADDED
|
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Closer Look at the Adversarial Robustness of Deep Equilibrium Models
|
| 2 |
+
|
| 3 |
+
Zonghan Yang $^{1}$ , Tianyu Pang $^{2}$ , Yang Liu $^{1,3,4*}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Department of Computer Science and Technology, Tsinghua University, Beijing, China
|
| 6 |
+
|
| 7 |
+
$^{2}$ Sea AI Lab, Singapore
|
| 8 |
+
|
| 9 |
+
$^{3}$ Institute for AI Industry Research (AIR), Tsinghua University, Beijing, China
|
| 10 |
+
|
| 11 |
+
<sup>4</sup>Beijing Academy of Artificial Intelligence, Beijing, China
|
| 12 |
+
|
| 13 |
+
yangzh20@mails.tsinghua.edu.cn, tianyupang@sea.com, liuyang2011@tsinghua.edu.cn
|
| 14 |
+
|
| 15 |
+
# Abstract
|
| 16 |
+
|
| 17 |
+
Deep equilibrium models (DEQs) refrain from the traditional layer-stacking paradigm and turn to find the fixed point of a single layer. DEQs have achieved promising performance on different applications with featured memory efficiency. At the same time, the adversarial vulnerability of DEQs raises concerns. Several works propose to certify robustness for monotone DEQs. However, limited efforts are devoted to studying empirical robustness for general DEQs. To this end, we observe that an adversially trained DEQ requires more forward steps to arrive at the equilibrium state, or even violates its fixed-point structure. Besides, the forward and backward tracks of DEQs are misaligned due to the black-box solvers. These facts cause gradient obfuscation when applying the ready-made attacks to evaluate or adversarily train DEQs. Given this, we develop approaches to estimate the intermediate gradients of DEQs and integrate them into the attacking pipelines. Our approaches facilitate fully white-box evaluations and lead to effective adversarial defense for DEQs. Extensive experiments on CIFAR-10 validate the adversarial robustness of DEQs competitive with deep networks of similar sizes.
|
| 18 |
+
|
| 19 |
+
# 1 Introduction
|
| 20 |
+
|
| 21 |
+
Conventional deep networks employ multiple stacked layers to process data in a feedforward manner [17]. During training, network parameters are optimized by backpropagating loss updates through the consecutive layers [36]. Recently, [3] propose deep equilibrium models (DEQs), whose forward pass involves finding the fixed point (i.e., equilibrium state) of a single layer. With implicit differentiation, the backward pass of DEQs is formulated as another linear fixed-point system. Training DEQs with black-box root solvers only consumes $\mathcal{O}(1)$ memory, which enables DEQs to achieve performance competitive with conventional networks in large-scale applications, including language modelling [3], image classification and segmentation [4], density modelling [24, 16], and graph modelling [23].
|
| 22 |
+
|
| 23 |
+
Considering the fixed point as a local attractor, DEQs are expected to be stable to small input perturbations. However, empirical observations show the opposite that a vanilla DEQ is also vulnerable to adversarial attacks [16]. Along this routine, several works are proposed to investigate the certified robustness for monotone DEQs [40, 34, 27, 20, 28, 10]. Inspired from the monotone operator splitting theories, monotone DEQs are designed with the guarantee of existence and convergence of equilibrium points. However, the layer parameterization of monotone DEQs and the limited scalability of certification methods narrow the scope of these previous studies. On the other hand, [16] explore the adversarial robustness for general DEQs. They incorporate the adversarial generation process into the equilibrium solver to accelerate the PGD attack [25]. Nevertheless, the PGD attack is originally designed for deep networks, requiring for end-to-end white-box differentiation. In contrast, DEQs
|
| 24 |
+
|
| 25 |
+
rely on black-box solvers and could obfuscate the gradients used in PGD: as shown in Fig. 2-(a), in DEQs trained with different configurations, the intermediate states always exhibit higher robustness than the final state under ready-made PGD attacks. Compared to the extensive literature on the adversarial robustness of deep networks [6, 38, 15, 26, 22, 25, 42, 35, 29], much less is known about the adversarial robustness of general DEQs, especially under a well-elaborate white-box setting. This motivates us to disentangle the modules in DEQs and provide a fair evaluation of their robustness.
|
| 26 |
+
|
| 27 |
+
In this paper, we first summarize the challenges of training robust DEQs (see Sec. 3), including (i) convergence of the black-box solvers and (ii) misalignment between the forward and backward passes. The off-the-shelf attacks work in a gray-box setting as they have no access to the intermediate states in the forward pass. To thoroughly evaluate the robustness, we propose two methods for intermediate gradient estimation: the first one is iterating adjoint gradient estimations simultaneously in the forward pass, as formally described in Sec. 4.1; the second one is estimating intermediate gradients by unrolling, as seen in Sec. 4.2. Then in Sec. 5, we develop approaches to integrate the estimated gradients into the ready-made attacks towards fully white-box adversaries. We also design defense strategies for DEQs to boost their robustness under white-box attacks.
|
| 28 |
+
|
| 29 |
+
We use PGD-AT to train large-sized and XL-sized DEQs on CIFAR-10. To benchmark their robustness [12], the parameter sizes of the DEQs are set to be comparable with ResNet-18 [18] and WideResNet-34-10 [41], respectively. We observe that the adversarially trained DEQs with the exact gradient [3] require more forward steps to arrive at the equilibrium state, or even violate their fixed-point structures. We also find an intriguing robustness accumulation effect that the intermediate states in the forward pass are more robust under ready-made attacks. These phenomena exhibit gradient obfuscation [2], which verifies the necessity of intermediate gradient estimation to construct white-box attacks and defense strategies. Robustness performance under the white-box evaluation shows that DEQs achieve competitive or stronger adversarial robustness than deep networks of similar parameter amounts. Our investigation sheds light on the pros and cons with respect to the adversarial robustness of DEQs.
|
| 30 |
+
|
| 31 |
+
# 2 Background
|
| 32 |
+
|
| 33 |
+
This section includes the background on DEQs and adversarial robustness for deep networks.
|
| 34 |
+
|
| 35 |
+
# 2.1 Deep equilibrium models
|
| 36 |
+
|
| 37 |
+
We first briefly introduce the modelling of deep equilibrium models (DEQs) [3, 4]. Consider a $T$ -layer weight-tied input-injected neural network:
|
| 38 |
+
|
| 39 |
+
$$
|
| 40 |
+
\mathbf {z} _ {n} = f _ {\theta} \left(\mathbf {z} _ {n - 1}; \mathbf {x}\right), n = 1, \dots , T, \tag {1}
|
| 41 |
+
$$
|
| 42 |
+
|
| 43 |
+
where $\mathbf{x} \in \mathbb{R}^l$ is the input, $\mathbf{z}_n \in \mathbb{R}^d$ is the output of the $n$ -th layer, and $\theta$ is the network weights shared across different layers. One can cast the evolution of $\{\mathbf{z}_n\}$ as a fixed-point iteration process. When $n \to \infty$ , $\mathbf{z}_n$ converges to the fixed point $\mathbf{z}^*$ which satisfies the equation $\mathbf{z}^* = f_\theta (\mathbf{z}^*;\mathbf{x})$ .
|
| 44 |
+
|
| 45 |
+
Deep equilibrium models rely on the fixed-point equation and leverage a black-box solver to directly solve for $\mathbf{z}^*$ in the forward pass. The backward pass of DEQs can also be formulated as a fixed-point iteration process. With the loss function $L(\mathbf{z}^*, y)$ and implicit differentiation, we can compute the gradient with respect to $\theta$ or $\mathbf{x}$ with
|
| 46 |
+
|
| 47 |
+
$$
|
| 48 |
+
\frac {\partial L}{\partial (\cdot)} = \left(\frac {\partial f _ {\theta} \left(\mathbf {z} ^ {*} ; \mathbf {x}\right)}{\partial (\cdot)}\right) \underbrace {\left(I - \frac {\partial f _ {\theta} \left(\mathbf {z} ^ {*} ; \mathbf {x}\right)}{\partial \mathbf {z}}\right) ^ {- 1} \frac {\partial L \left(\mathbf {z} ^ {*} , y\right)}{\partial \mathbf {z}}} _ {\mathbf {u} ^ {*}}, \tag {2}
|
| 49 |
+
$$
|
| 50 |
+
|
| 51 |
+
where $(\partial \mathbf{a} / \partial \mathbf{b})_{ij} = \partial \mathbf{a}_j / \partial \mathbf{b}_i$ and $\mathbf{u}^*$ satisfies
|
| 52 |
+
|
| 53 |
+
$$
|
| 54 |
+
\mathbf {u} ^ {*} = \left(\frac {\partial f _ {\theta} \left(\mathbf {z} ^ {*} ; \mathbf {x}\right)}{\partial \mathbf {z}}\right) \mathbf {u} ^ {*} + \frac {\partial L \left(\mathbf {z} ^ {*} , y\right)}{\partial \mathbf {z}}. \tag {3}
|
| 55 |
+
$$
|
| 56 |
+
|
| 57 |
+
According to Eq. (3), the backward pass can also be executed with a black-box fixed-point solver, and this iteration process is independent of that in the forward pass.
|
| 58 |
+
|
| 59 |
+
Several techniques have been proposed to improve the training stability of DEQs. [5] propose to regularize the Jacobian matrix in Eq. (2) during training so that the nonlinear forward system and the backward linear system enjoy appropriate contractivity. [14] propose unrolling-based and
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
(a) Exact gradient
|
| 63 |
+
(b) Unrolling-based phantom gradient
|
| 64 |
+
|
| 65 |
+

|
| 66 |
+
Figure 1: The gradients proposed for DEQs. (a): the exact gradient [3] solved by an independent fixed-point iteration process. (b): the unrolling-based phantom gradient [14] returned by automatic differentiation on a computational subgraph where the equilibrium state $\mathbf{z}^*$ is unrolled. (c): simultaneous adjoint process along with the forward iterations described in Sec. 4.1. (d): unrolling the intermediate states $\mathbf{z}_n$ for gradient estimation in Sec. 4.2. We leverage (c) and (d) to estimate intermediate gradients and design fully white-box attacks to evaluate the robustness of DEQs.
|
| 67 |
+
|
| 68 |
+

|
| 69 |
+
(c) Simultaneous adjoint process
|
| 70 |
+
|
| 71 |
+

|
| 72 |
+
(d) Unrolling the intermediates
|
| 73 |
+
|
| 74 |
+
Neumann-series-based phantom gradients to replace the exact gradient in Eq. (2) for acceleration. The unrolling-based phantom gradient is defined as
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
\lambda \sum_ {t = 1} ^ {k} \left(\frac {\partial f _ {\theta} (\hat {\mathbf {z}} _ {N + t} ; \mathbf {x})}{\partial (\cdot)}\right) \mathbf {P} _ {\lambda , \mathbf {z} _ {N}} ^ {(t)} \frac {\partial L (\hat {\mathbf {z}} _ {N + k} , y)}{\partial \mathbf {z}}, \tag {4}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
where
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
\mathbf {P} _ {\lambda , \mathbf {z} _ {N}} ^ {(t)} = \prod_ {s = t + 1} ^ {k} \left(\lambda \frac {\partial f _ {\theta} (\hat {\mathbf {z}} _ {N + s} ; \mathbf {x})}{\partial \mathbf {z}} + (1 - \lambda) I\right), \tag {5}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
\hat {\mathbf {z}} _ {N + t} = (1 - \lambda) \hat {\mathbf {z}} _ {N + t - 1} + \lambda f _ {\theta} (\hat {\mathbf {z}} _ {N + t - 1}; \mathbf {x}) \tag {6}
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
are the $k$ unrolling steps with $1 \leq t \leq k$ , starting from $\hat{\mathbf{z}}_N = \mathbf{z}^*$ returned by the forward solver.
|
| 91 |
+
|
| 92 |
+
Eq. (4) is calculated by the automatic differentiation framework [30] on the computational subgraph in Eq. (6). It is demonstrated that the unrolling-based phantom gradient imposes implicit Jacobian regularization effect to DEQ training [14]. DEQs trained by either the exact or the phantom gradients are competitive to deep neural networks in terms of natural accuracy. In our work, we leverage adversarial defense strategies to train DEQs to improve their robustness.
|
| 93 |
+
|
| 94 |
+
# 2.2 Adversarial robustness for deep networks
|
| 95 |
+
|
| 96 |
+
Much research has been dedicated to adversarial attacks and defenses of deep neural networks. On the one hand, white-box adversarial attack techniques like PGD [25] construct adversaries by iteratively perturbing inputs in the gradient ascent direction. The robustness of deep networks is benchmarked by AutoAttack [12], which consists of four attacks including two PGD variants with adaptive stepsize and the query-based SQUARE attack [1]. On the other hand, adversarial training (AT) [25] is one of the most effective defense strategies. By early stopping the training procedure as in [35], the primary PGD-AT framework still achieves competitive robustness performance compared with the state-of-the-art defense techniques like TRADES [42]. It is worth mentioning that many defense approaches claim robustness improvement by obfuscating gradients, which proves to be a false sense of security under adaptive attacks designed specifically [2]. In our work, we train DEQs with PGD-AT and investigate their adversarial robustness by designing customized defenses and adaptive attacks.
|
| 97 |
+
|
| 98 |
+
# 3 Challenges for robust general DEQs
|
| 99 |
+
|
| 100 |
+
This section describes the challenges encountered when we aim to train robust general DEQs.
|
| 101 |
+
|
| 102 |
+
Misalignment between forward & backward passes. The central idea of DEQs is directly solving for the equilibrium state $\mathbf{z}^*$ and differentiating through the fixed point equation $\mathbf{z}^* = f_{\theta}(\mathbf{z}^*;\mathbf{x})$ for efficient forward and backward passes. Fig. 1-(a) sketches the calculation of the exact gradient [3]. Independent from the forward iterations (the blue curve), the exact gradient is acquired by solving for a linear fixed-point system that only depends on the equilibrium state $\mathbf{z}^*$ (the orange curve). Fig. 1-(b) shows the calculation of the unrolling-based phantom gradient [14]. $\mathbf{z}^*$ as the final state in the forward pass is unrolled (the gray iteration), and the gradient is obtained from the automatic differentiation on the loss function. However, when iterating the gradient computations,
|
| 103 |
+
|
| 104 |
+
the intermediate states $\{\mathbf{z}_n\}$ in the forward pass are bypassed by both methods. The misalignment between the forward and backward tracks results in a gray-box setting for the ready-made attacks.
|
| 105 |
+
|
| 106 |
+
Convergence of the black-box solvers. In contrast with monotone DEQs, there is no guarantee for the existence and convergence of the equilibrium states in general DEQs. It is thus unknown whether the black-box solvers in DEQs still converge to equilibrium states under input perturbations. Adversarial training also adds the concern on equilibrium convergence. The well-known effect of adversarial training for deep networks is the trade-off between robustness and accuracy [37, 39, 42, 32]. A similar drop in standard accuracy (from $78\%$ to $55\%$ ) is also observed for tiny-sized adversarially-trained DEQs [16]. The robustness-accuracy trade-off brings training instability for general DEQs, which may take more iterations in the solvers for equilibrium convergence, or even violate their fixed-point structures. Finally, the robustness comparison is still under-explored between large-sized general DEQs and deep networks with similar parameter counts.
|
| 107 |
+
|
| 108 |
+
# 4 On intermediate gradient estimation
|
| 109 |
+
|
| 110 |
+
As the forward and backward tracks in DEQs are misaligned, the intermediate states in the forward pass are inaccessible to off-the-shelf attacks, which causes gradient obfuscation and results in false positive robustness. Therefore, it is necessary to estimate the intermediate gradients. With the integration of the estimated gradients, the attacks can validate the robustness of DEQs in a fully white-box setting. In this section, we propose two methods for intermediate gradient estimation.
|
| 111 |
+
|
| 112 |
+
# 4.1 Simultaneous adjoint in the forward pass
|
| 113 |
+
|
| 114 |
+
Inspired by the adjoint process in neural ODE models [9], we propose the adjoint process for intermediate gradient estimation in DEQs. The adjoint process in neural ODE models is characterized by an adjoint ODE [31]. For DEQs, we propose to iterate the updates of adjoint states subject to $\mathbf{z}_n$ in the forward pass. We investigate the simultaneous adjoint with Broyden's method [7] as the forward solver. In the forward pass, Broyden's method updates the intermediate state $\mathbf{z}_n$ based on the residual $g_{\theta}(\mathbf{z}_n;\mathbf{x}) = f_{\theta}(\mathbf{z}_n;\mathbf{x}) - \mathbf{z}_n$ and $B_{n}$ , the low-rank approximation of the Jacobian inverse:
|
| 115 |
+
|
| 116 |
+
$$
|
| 117 |
+
\mathbf {z} _ {n + 1} = \mathbf {z} _ {n} - \alpha B _ {n} g _ {\theta} (\mathbf {z} _ {n}; \mathbf {x}), \quad \mathbf {z} _ {0} = \mathbf {0} \tag {7}
|
| 118 |
+
$$
|
| 119 |
+
|
| 120 |
+
$$
|
| 121 |
+
B _ {n + 1} = B _ {n} + \frac {\Delta \mathbf {z} _ {n + 1} - B _ {n} \Delta g _ {n + 1}}{\Delta \mathbf {z} _ {n + 1} ^ {\mathrm {T}} B _ {n} \Delta g _ {n + 1}} \Delta \mathbf {z} _ {n + 1} ^ {\mathrm {T}} B _ {n}, \tag {8}
|
| 122 |
+
$$
|
| 123 |
+
|
| 124 |
+
where $0 \leq n \leq N - 1$ , $B_0 = -I$ , $\Delta \mathbf{z}_{n + 1} = \mathbf{z}_{n + 1} - \mathbf{z}_n$ , $\Delta g_{n + 1} = g_{\theta}(\mathbf{z}_{n + 1};\mathbf{x}) - g_{\theta}(\mathbf{z}_n;\mathbf{x})$ , and $\alpha$ is the step size. To maintain a simultaneous adjoint, we start from $\mathbf{u}_0 = \mathbf{0}$ and use Broyden's method to solve Eq. (3). Similar with the residual function $g_{\theta}(\cdot ;\mathbf{x})$ for $\mathbf{z}_n$ , the fixed-point equation in Eq. (3) defines the residual of the adjoint state. However, we propose to replace the $\mathbf{z}^*$ in Eq. (3) by $\mathbf{z}_n$ , and integrate the approximated Jacobian inverse $B_n$ to force the alignment of the adjoint state update:
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
\mathbf {v} _ {n} = \left(\frac {\partial f _ {\theta} (\mathbf {z} _ {n} ; \mathbf {x})}{\partial \mathbf {z}}\right) \mathbf {u} _ {n} + \frac {\partial L (\mathbf {z} _ {n} , y)}{\partial \mathbf {z}} - \mathbf {u} _ {n}, \tag {9}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
\mathbf {u} _ {n + 1} = \mathbf {u} _ {n} - \beta B _ {n} \mathbf {v} _ {n}, \tag {10}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
where $\mathbf{v}_n$ is the residual at iteration $n$ , $\mathbf{u}_n$ is the updated adjoint state, and $\beta > 0$ is the step size.
|
| 135 |
+
|
| 136 |
+
We use the following surrogate gradients to construct attacks on the intermediate state $\mathbf{z}_n$ :
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
\left[ \frac {\partial L}{\partial x} \right] _ {n} = \left(\frac {\partial f _ {\theta} (\mathbf {z} _ {n} ; \mathbf {x})}{\partial \mathbf {x}}\right) \mathbf {u} _ {n}. \tag {11}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
An illustration for the simultaneous adjoint process is shown in Fig. 1-(c). In the following, we refer to this method as simultaneous adjoint when constructing intermediate state attacks in Sec 5.1.
|
| 143 |
+
|
| 144 |
+
Remark 4.1. We show in Appendix B that under mild assumptions, the $\{\mathbf{u}_n\}$ converges to $\mathbf{u}^*$ when $0 < \beta < 1$ in Eq. (10). However in practice, we do not require the convergence of $\mathbf{u}_n$ as we only use them in Eq. (11) as gradient estimations to construct intermediate attacks.
|
| 145 |
+
|
| 146 |
+
Remark 4.2. Similar with the update of $\mathbf{u}_n$ in our approach, [16] propose augmented DEQs as an integration of the iterative updating process of $\mathbf{z}$ , $\mathbf{u}$ , and $\mathbf{x}$ as a whole:
|
| 147 |
+
|
| 148 |
+
$$
|
| 149 |
+
F \left(\left[ \begin{array}{l} \mathbf {z} _ {n} \\ \mathbf {u} _ {n} \\ \mathbf {x} _ {n} \end{array} \right]\right) = \left[ \begin{array}{c} f _ {\theta} (\mathbf {z} _ {n}; \mathbf {x} _ {n}) \\ \left(\frac {\partial f _ {\theta} (\mathbf {z} _ {n} ; \mathbf {x} _ {n})}{\partial \mathbf {z}}\right) \mathbf {u} _ {n} + \frac {\partial L (\mathbf {z} _ {n} , y)}{\partial \mathbf {z}} \\ \mathbf {x} _ {n} - \left(\frac {\partial f _ {\theta} (\mathbf {z} _ {n} ; \mathbf {x} _ {n})}{\partial \mathbf {x}}\right) \mathbf {u} _ {n} \end{array} \right] \tag {12}
|
| 150 |
+
$$
|
| 151 |
+
|
| 152 |
+
The augmented DEQs leverage a black-box solver (e.g., Broyden's method) to find the equilibrium of the whole state $\left[\mathbf{z}^{*},\mathbf{u}^{*},\mathbf{x}^{*}\right]^{\mathrm{T}}$ . However, several cross-terms exist in the joint Jacobian due to the coupling of the three iteration processes, which further hinders the convergence. In contrast, the simultaneous adjoint update in Eq. (10) does not include the update of $\mathbf{x}$ . Furthermore, we reuse the Jacobian inverse approximation matrix $B_{n}$ in the update of $\mathbf{u}_n$ , which is easy to implement. Because of the disentanglement of $\mathbf{z}_n$ and $\mathbf{u}_n$ in the joint Jacobian, our method also enjoys better efficiency and flexibility as one can early exit the adjoint process without affecting the updates of $\mathbf{z}_n$ 's.
|
| 153 |
+
|
| 154 |
+
Remark 4.3. Concurrent work [33] also explores the idea of sharing approximated Jacobian inverse $B_{n}$ in bi-level optimization problems. While their motivation is to accelerate DEQ training, we use the adjoint states to construct gradient estimation and facilitate white-box attacks. We also compare our work with theirs in terms of intermediate state attacks; See Appendix D for details.
|
| 155 |
+
|
| 156 |
+
# 4.2 Unrolling the intermediate states
|
| 157 |
+
|
| 158 |
+
We also propose to estimate the gradient at the state $\mathbf{z}_n$ by unrolling. Depicted in Fig. 1-(d), $\mathbf{z}_n$ is involved in an artificially constructed computational graph. We can thus estimate the intermediate gradient by backpropagation with automatic differentiation. Formally, applying Eq. (4) to $\mathbf{z}_n$ yields
|
| 159 |
+
|
| 160 |
+
$$
|
| 161 |
+
\left[ \frac {\partial L}{\partial \mathbf {x}} \right] _ {n} ^ {(k)} = \mathbf {A} _ {\lambda , \mathbf {z} _ {n}} ^ {(k)} \frac {\partial L (\hat {\mathbf {z}} _ {n + k} , y)}{\partial \mathbf {z}}, \tag {13}
|
| 162 |
+
$$
|
| 163 |
+
|
| 164 |
+
where
|
| 165 |
+
|
| 166 |
+
$$
|
| 167 |
+
\mathbf {A} _ {\lambda , \mathbf {z} _ {n}} ^ {(k)} = \lambda \sum_ {t = 0} ^ {k - 1} \left(\frac {\partial f _ {\theta} \left(\hat {\mathbf {z}} _ {n + t} ; \mathbf {x}\right)}{\partial \mathbf {x}}\right) \mathbf {P} _ {\lambda , \mathbf {z} _ {n}} ^ {(k)}, \tag {14}
|
| 168 |
+
$$
|
| 169 |
+
|
| 170 |
+
$$
|
| 171 |
+
\mathbf {P} _ {\lambda , \mathbf {z} _ {n}} ^ {(k)} = \prod_ {s = t + 1} ^ {k - 1} \left(\lambda \frac {\partial f _ {\theta} \left(\hat {\mathbf {z}} _ {n + s} ; \mathbf {x}\right)}{\partial \mathbf {z}} + (1 - \lambda) I\right), \tag {15}
|
| 172 |
+
$$
|
| 173 |
+
|
| 174 |
+
and the state sequence $\hat{\mathbf{z}}_n, \hat{\mathbf{z}}_{n+1}, \dots, \hat{\mathbf{z}}_{n+k}$ represents the damped unrolling iteration:
|
| 175 |
+
|
| 176 |
+
$$
|
| 177 |
+
\hat {\mathbf {z}} _ {n + t} = (1 - \lambda) \hat {\mathbf {z}} _ {n + t - 1} + \lambda f _ {\theta} (\hat {\mathbf {z}} _ {n + t - 1}; \mathbf {x}), \tag {16}
|
| 178 |
+
$$
|
| 179 |
+
|
| 180 |
+
with $t = 1,2,\dots ,k$ and $\hat{\mathbf{z}}_n = \mathbf{z}_n$ . While Eq. (4) is proposed as an approximation of the exact gradient, we unroll the states $\mathbf{z}_n$ for intermediate gradient estimation. Similar to the case of Eq. (11), we use Eq. (13) as estimation to design intermediate attacks for DEQs. We refer to this method as unrolled intermediates in the following when incorporating Eq. (13) into the white-box attacks.
|
| 181 |
+
|
| 182 |
+
# 5 White-box attacks and defenses for DEQs
|
| 183 |
+
|
| 184 |
+
This section describes different types of white-box attacks and defense strategies for DEQs.
|
| 185 |
+
|
| 186 |
+
# 5.1 White-box attacks for DEQs
|
| 187 |
+
|
| 188 |
+
The existing attacks leverage the gradients calculated at the final state outputted by the forward solver. Based on the surrogate intermediate gradients in Eq. (11) or Eq. (13), we can involve the $\mathbf{z}_n$ in the forward pass into the construction of adversaries. A direct white-box approach is to use the estimated gradient at an early state $\mathbf{z}_n$ as an alternative for input perturbations. Another simple yet effective method is to average the intermediate gradients as the gradient ensemble for attacks. For example, the average of all intermediate gradients along the simultaneous adjoint process is given by
|
| 189 |
+
|
| 190 |
+
$$
|
| 191 |
+
\sum_ {n} \left[ \frac {\partial L}{\partial \mathbf {x}} \right] _ {n} = \sum_ {n} \left(\frac {\partial f _ {\theta} (\mathbf {z} _ {n} ; \mathbf {x})}{\partial \mathbf {x}}\right) \mathbf {u} _ {n}. \tag {17}
|
| 192 |
+
$$
|
| 193 |
+
|
| 194 |
+
The gradient ensemble can be viewed as the fusion of all perturbation directions indicated by all $\mathbf{z}_n$ 's.
|
| 195 |
+
|
| 196 |
+
# 5.2 Defenses with intermediate states
|
| 197 |
+
|
| 198 |
+
In addition to the final state $\mathbf{z}^*$ , the unused intermediate states can be leveraged as well for the defenses of DEQs. A simple yet effective defense strategy is to early exit the forward solver during
|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
Figure 2: Challenges in benchmarking adversarial robustness of DEQs. (a) Gradient obfuscation issues arise in the DEQs trained with different configurations. With different iteration settings in the DEQ solver or different gradient formulations, the intermediate state always exhibit higher robustness than the final state under ready-made PGD-10 attack. (b) Exact-trained DEQ with small iterations violate the fixed-point structure and require more iterations to retain it (analyzed in Sec. 6.1). Both observations motivate us to design adaptive attacks for white-box robustness evaluation for DEQs.
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
|
| 205 |
+
inference. We can evaluate the robustness of DEQs with the early state $\mathbf{z}_n$ in the forward pass, as $\mathbf{z}_n$ and $\mathbf{z}^*$ have the same shape. We determine the optimal timing for early exit by selecting the top robustness performance of all $\mathbf{z}_n$ 's on the development set under the ready-made PGD-10 attack.
|
| 206 |
+
|
| 207 |
+
The input-injected neural network provides an interpretation for the early-state defense. From Eq. (1), the distortion of $\mathbf{z}_n$ comes from both the perturbed $\mathbf{z}_{n - 1}$ and the biased transformation $f_{\theta}(\cdot ;\mathbf{x} + \Delta \mathbf{x})$ . By early exiting the forward process, one obtains a less distorted intermediate state.
|
| 208 |
+
|
| 209 |
+
Another defense strategy for DEQs is leveraging the ensemble of intermediate states. Similar with Eq. (17), we average the intermediate states $\{\mathbf{z}_n\}$ to defend against attacks. Instead of early stopping, the intermediate state ensemble exploits the state representations at all iterations in the forward solver.
|
| 210 |
+
|
| 211 |
+
While the proposed defense techniques leverage the intermediate states, they still require only $\mathcal{O}(1)$ memory. For early state defense, we determine the optimal time to early exit the solver on the development set offline for once and then fix the early exit step during testing. For ensemble state defense, we maintain an accumulator to sum up $\{\mathbf{z}_n\}$ along the forward pass without storing them.
|
| 212 |
+
|
| 213 |
+
# 6 Experiments
|
| 214 |
+
|
| 215 |
+
Following the settings in [5], we experiment with the large-sized DEQ with its parameter count similar to ResNet-18 [17]. We also experiment with an XL-sized DEQ with its parameter count similar to WideResNet-34-10 [41] to enable a fair comparison with the empirical robustness of the deep networks. The detailed experimental settings are listed in Appendix A. We first train DEQs on CIFAR-10 [21] with the PGD-AT framework [25], then test the adaptive attacks and defense strategies proposed in Sec. 5 on the adversarially-trained DEQs. We refer to a DEQ as "exact-trained" when using the exact gradient, and "unrolling-trained" when using the unrolling-based phantom gradient in the PGD-AT framework to generate adversaries and optimize for model parameters. Unless specified, all DEQs are adversarially trained in this paper. During training, we use 10-step PGD with the step size of $2/255$ to generate adversaries within the range of $\ell_{\infty} = 8/255$ . For the specific type of attacks, we use PGD and AutoAttack (AA) [13] to instantiate the white-box attacks in Sec. 5.1.
|
| 216 |
+
|
| 217 |
+
# 6.1 The retention of the fixed-point structure
|
| 218 |
+
|
| 219 |
+
We start with the observation on the fixed-point structure. Shown in Fig. 2-(b), the lines illustrate the relative error $\| f_{\theta}(\mathbf{z}_n;\mathbf{x}) - \mathbf{z}_n\| _2 / \| f_{\theta}(\mathbf{z}_n;\mathbf{x})\| _2$ for each $\mathbf{z}_n^2$ . We find that for the exact-trained DEQ with small iteration settings (8 forward / 7 backward iterations), all the relative errors are larger than 0.75, i.e., the forward solver in the DEQ fails to converge to an equilibrium. Such phenomenon
|
| 220 |
+
|
| 221 |
+
Table 2: Performance (\%) of the unrolling-trained DEQ-Large with the small (8/7) iteration setting and the exact-trained DEQ-Large with the large (50/50) iteration setting under PGD-10. The "final" rows and columns represent the original DEQ output and the ready-made attacks at the final state. The "early" rows indicate early state defense, and the "intermediate" columns indicate the performance of the strongest intermediate attacks. The rows and the columns of "ensemble" demonstrate the ensemble defense and the white-box attacks based on gradient ensemble. Under the (underlined) strongest attacks, the ensemble defense achieves the best robustness performance (in bold).
|
| 222 |
+
|
| 223 |
+
<table><tr><td rowspan="2">Training Configurations</td><td rowspan="2">Defense</td><td rowspan="2">Clean</td><td colspan="3">Simultaneous Adjoint</td><td colspan="3">Unrolled Intermediates</td></tr><tr><td>Final</td><td>Intermediate</td><td>Ensemble</td><td>Final</td><td>Intermediate</td><td>Ensemble</td></tr><tr><td rowspan="3">(8/7) Unrolling-Trained</td><td>Final</td><td>78.03</td><td>49.81</td><td>59.49</td><td>54.91</td><td>42.67</td><td>62.24</td><td>51.52</td></tr><tr><td>Early</td><td>79.57</td><td>54.90</td><td>39.19</td><td>42.76</td><td>51.90</td><td>29.38</td><td>34.20</td></tr><tr><td>Ensemble</td><td>79.67</td><td>51.52</td><td>52.43</td><td>49.47</td><td>49.02</td><td>55.10</td><td>47.12</td></tr><tr><td rowspan="3">(50/50) Exact-Trained</td><td>Final</td><td>73.51</td><td>37.77</td><td>70.52</td><td>43.70</td><td>36.70</td><td>69.29</td><td>48.08</td></tr><tr><td>Early</td><td>86.98</td><td>75.25</td><td>12.44</td><td>40.12</td><td>73.93</td><td>18.24</td><td>26.22</td></tr><tr><td>Ensemble</td><td>75.12</td><td>40.20</td><td>72.41</td><td>45.06</td><td>39.18</td><td>68.83</td><td>49.10</td></tr></table>
|
| 224 |
+
|
| 225 |
+
reflect the challenge on the convergence of the black-box solvers in DEQs mentioned in Section 3. It is shown that a larger iteration setting is required (18 forward / 20 backward iterations) for exact-trained DEQs to retain the fixed-point structure. In contrast, we find the small iteration setting (8/7) is enough for the unrolling-trained DEQ to retain the fixed-point structure.
|
| 226 |
+
|
| 227 |
+
It is necessary to retain the fixed-point structure, otherwise leading to gradient obfuscation issues. As is derived from the implicit differentiation on the fixed-point equation $\mathbf{z}^{*} = f_{\theta}(\mathbf{z}^{*};\mathbf{x})$ ,the exact gradient in Eq. (2) becomes inexact when the equilibrium point $\mathbf{z}^*$ is not reached. Table 1 shows the empirical performance of the exacttrained DEQ under the small (8/7) iteration setting. The severe performance degradation under alternative gradient formulations as well as the SQUARE attack also indicates gradient obfuscation, as suggested in [2] and [8].
|
| 228 |
+
|
| 229 |
+
While large iterations for the exact-trained DEQs keep the fixed-point structure, it in
|
| 230 |
+
|
| 231 |
+
Table 1: Performance $(\%)$ of the exact-trained DEQ-Large with the small (8/7) solver iterations under different attacks. The high accuracy under PGD-10 with the exact gradient is deteriorated using the unrolling-based phantom gradient. Leveraging the query-based SQUARE leads to even lower accuracy. These observations indicate that the DEQ with violated fixed-point structure suffers severe robustness degradation.
|
| 232 |
+
|
| 233 |
+
<table><tr><td>Gradient</td><td>Clean</td><td>PGD-10</td><td>PGD-1000</td><td>SQUARE</td></tr><tr><td>Exact</td><td rowspan="2">78.24</td><td>79.97</td><td>80.10</td><td rowspan="2">5.95</td></tr><tr><td>Unrolling</td><td>37.07</td><td>36.39</td></tr></table>
|
| 234 |
+
|
| 235 |
+
evitably slows down the training speed (detailed in Appendix E.3). For the exact-trained DEQs with the small (8/7) iteration setting, we have also tried with varied Jacobian regularization weights to impose stricter Lipschitz constraints during training, but found the DEQ solver still diverged. We have also analyzed the instability by tracing the variation of Lipschitz constant during the adversarial training of DEQs; See Appendix E.4 for details. By comparison, the unrolling-trained DEQ requires fewer iterations in the forward solver to converge. According to the green line in Fig. 2-(b), the relative errors become lower consequently and reach 0.04 at the final state. The results coincide with [14] that the unrolling-based phantom gradient invokes implicit Jacobian regularization during training.
|
| 236 |
+
|
| 237 |
+
# 6.2 Robustness of DEQs under white-box attacks
|
| 238 |
+
|
| 239 |
+
Intriguingly, we discover the robustness accumulation effect in both the exact-trained and the unrolling-trained DEQs. We plot the highest robustness under the ready-made PGD-10 among all the intermediate states in Fig. 2-(a), with comparison to the final state robustness. It is shown that the intermediate states always exhibit much higher robustness. The accumulated robustness comes from gradient obfuscation, as the ready-made attacks fail to "directly" attack the intermediate states due to misaligned gradients. This resonates with the first challenge in Sec. 3, and similar results are observed as well in adversarially-trained neural ODEs: the large error tolerance from the ODE solvers with adaptive step sizes allows gradient masking after adversarial training [19].
|
| 240 |
+
|
| 241 |
+
The exact-trained DEQs, as we have discussed in Sec. 6.1, require larger iterations in the solver. However, it is noticed in Fig. 2-(a) that the larger the iteration is in the exact-trained DEQs, the more robust the intermediate states are under ready-made PGD-10. On the contrary, the (8/7) unrolling-trained DEQ still achieves the highest robustness at the final state. To benchmark the white-box robustness, in this section, we compare the (50/50) exact-trained DEQ-Large with the (8/7) unrolling-
|
| 242 |
+
|
| 243 |
+
Table 3: Performance (%) of the unrolling-trained DEQs under PGD-10/AutoAttack. The rows and the columns represent the same meanings as those in Table 2. Under the (underlined) strongest attacks, the ensemble defense achieves the best robustness performance (in bold).
|
| 244 |
+
|
| 245 |
+
<table><tr><td rowspan="2">Arch.</td><td rowspan="2">Defense</td><td rowspan="2">Clean</td><td colspan="3">Simultaneous Adjoint (PGD/AA)</td><td colspan="3">Unrolled Intermediates (PGD/AA)</td></tr><tr><td>Final</td><td>Intermediate</td><td>Ensemble</td><td>Final</td><td>Intermediate</td><td>Ensemble</td></tr><tr><td rowspan="3">Large</td><td>Final</td><td>78.03</td><td>49.81/51.48</td><td>59.49/61.95</td><td>54.91/52.95</td><td>42.67/37.27</td><td>62.24/65.53</td><td>51.52/49.66</td></tr><tr><td>Early</td><td>79.57</td><td>54.90/61.12</td><td>39.19/55.47</td><td>42.76/56.84</td><td>51.90/56.86</td><td>29.38/25.41</td><td>34.20/49.74</td></tr><tr><td>Ensemble</td><td>79.67</td><td>51.52/56.06</td><td>52.43/58.69</td><td>49.47/55.02</td><td>49.02/50.45</td><td>55.10/58.63</td><td>47.12/48.37</td></tr><tr><td rowspan="3">XL</td><td>Final</td><td>82.92</td><td>55.80/58.21</td><td>55.80/58.21</td><td>67.30/64.24</td><td>48.58/43.97</td><td>65.94/72.76</td><td>58.23/69.83</td></tr><tr><td>Early</td><td>80.12</td><td>51.40/58.92</td><td>51.40/58.92</td><td>60.78/62.98</td><td>52.08/56.58</td><td>55.70/62.67</td><td>48.88/62.87</td></tr><tr><td>Ensemble</td><td>81.17</td><td>52.87/58.08</td><td>52.87/58.08</td><td>61.40/62.23</td><td>51.70/54.09</td><td>59.71/66.90</td><td>53.23/56.45</td></tr></table>
|
| 246 |
+
|
| 247 |
+
trained one $^{3}$ . We integrate the estimated intermediate gradients in Sec. 4 as different alternatives in PGD-10 for white-box evaluation. Among all the attack candidates based on intermediate gradients, we select the one that leads to the largest robustness deterioration on the early-state defense in Sec. 5.2 and report the results in Table 2. Ablation studies on the performance of the attack candidates with estimated gradients at different intermediates can be found in Sec. 6.4. We also include the report of memory usage for the defense strategies in Appendix F.1, and the running time complexity analysis for the white-box attacks in Appendix F.2.
|
| 248 |
+
|
| 249 |
+
Shown in Table 2, for the unrolling-trained DEQ, unrolling the intermediate states results in the strongest attack to the final state and early state defenses. While the robustness accuracies under final and intermediate attacks are improved and better balanced with the ensemble state defense, the ensemble attack leads to the largest performance drop in this case, arriving at the overall white-box robustness of $47.12\%$ . The estimated intermediate gradients based on simultaneous adjoint process also shows significant attack performance for the exact-trained DEQ with large solver iterations. After maximizing the minimum robustness under all attacks across all defense techniques, the overall white-box robustness is $39.18\%$ . In addition, all attacks significantly deteriorate the robustness of the DEQs without adversarial training, indicating that the attacks leveraged in Table 2 are reliably strong (detailed in Sec. 6.5). Considering the superior robustness of the unrolling-trained DEQ as well as its training efficiency, we proceed to experiment with unrolling-trained DEQs for further evaluation.
|
| 250 |
+
|
| 251 |
+
# 6.3 Comparison between DEQs and deep networks
|
| 252 |
+
|
| 253 |
+
In this section, we further provide a thorough evaluation by benchmarking the white-box robustness performance of the unrolling-trained DEQ-Large and DEQ-XL under both PGD-10 and AutoAttack.
|
| 254 |
+
|
| 255 |
+
Table 3 shows the robustness performance of the unrolling-trained DEQ-Large and DEQ-XL. According to the results, the gradient ensemble attacks are more effective in defeating the early-state defense than the final-state defense. The ensemble attack is the most threatening on the ensemble defense in DEQ-Large. The attack with the gradient at the final state leads to the most substantial performance drop on the ensemble defense in DEQ-XL.
|
| 256 |
+
|
| 257 |
+
In Table 3, we find that the PGD-10 attack brings more significant performance drops than AutoAttack does in many settings. The results differ from the case in the robustness of deep networks [13]. The phenomenon originates from the difference between intermediate-state attacks and alternative defense strategies. AutoAttack will overfit to the provided gradients at the intermediate or the averaged states, thus generating less threatening adversaries on the defenses based on other states. In addition, the intermediate gradients can also be inaccurate, as they only serve as approximations.
|
| 258 |
+
|
| 259 |
+
Table 4: The comparison of robustness performance $(\%)$ between the DEQs with the ensemble defense and the deep networks of similar sizes. For the DEQs, the weakest robustness under all attacks in Table 3 is reported. For the deep networks, we report the results in [29].
|
| 260 |
+
|
| 261 |
+
<table><tr><td>Arch.</td><td>Clean</td><td>PGD-10</td><td>AA</td><td>#Params</td></tr><tr><td>ResNet-18</td><td>82.52</td><td>53.58</td><td>48.51</td><td>10M</td></tr><tr><td>DEQ-Large</td><td>79.67</td><td>47.12</td><td>48.37</td><td>10M</td></tr><tr><td>WRN-34-10</td><td>86.07</td><td>56.60</td><td>52.19</td><td>48M</td></tr><tr><td>DEQ-XL</td><td>81.17</td><td>51.70</td><td>54.09</td><td>48M</td></tr></table>
|
| 262 |
+
|
| 263 |
+
The minimum robustness under all types of attacks represents the robustness of a defense strategy. We thus take the most robust defenses for DEQ-Large and DEQ-XL, and compare them with the
|
| 264 |
+
|
| 265 |
+

|
| 266 |
+
(a)
|
| 267 |
+
|
| 268 |
+

|
| 269 |
+
(b)
|
| 270 |
+
Figure 3: Ablation study on the gradient estimations at different intermediate states. (a) Different robustness performance under PGD-10 with different intermediate adjoint states $\mathbf{u}_n$ as the surrogate gradient. For the approach in Sec. 4.1, $\mathbf{u}_4$ leads to the largest robustness drop in the early state $\mathbf{z}_3$ in the unrolling-based DEQ. (b) and (c): Different unrolled intermediates $\mathbf{z}_n$ with different $k$ 's in Eq. (14). the $\lambda$ in Eq. (13) is set as 0.5 in (b) and 1 in (c). For the method in Sec. 4.2, unrolling the state $\mathbf{z}_1$ with $k = 1$ and $\lambda = 1$ results in the largest robustness drop in the early state $\mathbf{z}_3$ .
|
| 271 |
+
|
| 272 |
+

|
| 273 |
+
(c)
|
| 274 |
+
|
| 275 |
+
deep networks of similar parameter counts. Shown in Table 4, the empirical robustness of the DEQs is competitive with or even slightly higher than that of the ResNet-18 and WRN-34-10 models with the PGD-AT framework, respectively.
|
| 276 |
+
|
| 277 |
+
# 6.4 Ablation study on different intermediate gradients
|
| 278 |
+
|
| 279 |
+
In this section, we study the effect of the white-box attacks with gradients estimated at different intermediate states in the forward solver of the large-sized DEQs.
|
| 280 |
+
|
| 281 |
+
We first inspect the attacks with intermediate gradients acquired from each adjoint state. Fig. 3-(a) plots the robustness of the early-state and final-state defense in both the unrolling-trained and the exact-trained DEQs. For the exact-trained DEQ, due to its violated fixed-point structure, $\mathbf{u}_8$ results in the strongest attack for both the early-state and the final-state defenses. For the unrolling-trained DEQ, the estimated gradients at the consecutive adjoint states $\{\mathbf{u}_n\}$ form increasingly stronger attacks on the robustness of the final state. On the robustness at the early state $(\mathbf{z}_3)$ , the state $\mathbf{u}_4$ gives rise to the strongest attack, which coincides with Eq. (9) and Eq. (10) that $\mathbf{u}_{n + 1}$ directly depends on $\mathbf{z}_n$ .
|
| 282 |
+
|
| 283 |
+
We further explore whether the simultaneous adjoint process are aligned with the forward pass. We use each intermediate adjoint state $\mathbf{u}_n$ as the gradient surrogate in the PGD-10 attack for the unrolling-trained DEQ. Fig. 4 shows the robustness performance of all the intermediate states $\mathbf{z}_n$ in the forward pass of the unrolling-trained DEQ-Large. According to Fig. 4, it always follows that $\mathbf{u}_{n + 1}$ results in the largest robustness drop of $\mathbf{z}_n$ . As $\mathbf{u}_{n + 1}$ directly depends on $\mathbf{z}_n$ in Eq. (10), this validates that the simultaneous adjoint process is aligned with the forward pass at each iteration in terms of adversarial robustness.
|
| 284 |
+
|
| 285 |
+
We also study the effect of unrolling different intermediate states $\{\mathbf{z}_n\}$ for surrogate gradient estimation. Fig. 3-(b)/(c) illustrates the robustness of the early state $\mathbf{z}_3$ in the unrolling-trained DEQ under white-box attacks in different settings. It is shown that the number of unrolling steps for
|
| 286 |
+
|
| 287 |
+

|
| 288 |
+
Figure 4: Alignment between the simultaneous adjoint process and the forward pass in the unrolling-trained DEQ.
|
| 289 |
+
|
| 290 |
+
intermediate states like $\mathbf{z}_1$ and $\mathbf{z}_2$ should not be too much in order to obtain a powerful intermediate attack. The reason of this might be the inaccuracy of the unrolled intermediate gradient estimates.
|
| 291 |
+
|
| 292 |
+
The gradient estimated by unrolling $\mathbf{z}_1$ leads to the most vigorous attack on the robustness of $\mathbf{z}_3$ . To understand the circumstance, we note that the unrolling-based intermediate gradient reflects only the feedback from the loss function at the unrolled state in Eq. (13) and Eq. (14). As a result, the estimated gradient may be misaligned with the unrolled state: gradients by unrolling $\mathbf{z}_3$ compose weak attack in terms of the robustness of $\mathbf{z}_3$ . It is inferred that the perturbation from the unrolled intermediate gradients must still be propagated in the forward pass to induce enough threatening
|
| 293 |
+
|
| 294 |
+
Table 5: The performance (%) under the standardly-trained DEQ-Large under ready-made PGD-10.
|
| 295 |
+
|
| 296 |
+
<table><tr><td>State</td><td>z1</td><td>z2</td><td>z3</td><td>z4</td><td>z5</td><td>z6</td><td>z7</td><td>z8</td></tr><tr><td>Clean Acc.</td><td>38.81</td><td>82.62</td><td>89.63</td><td>91.77</td><td>92.08</td><td>92.29</td><td>92.39</td><td>92.53</td></tr><tr><td>Robust Acc.</td><td>2.00</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td><td>0.00</td></tr></table>
|
| 297 |
+
|
| 298 |
+
Table 6: Performance (\%) of the standardly-trained DEQ-Large [5] with all proposed adaptive attacks and defense strategies under the PGD attack. The notations for the rows and the columns are similar with those in Tables 2 and 3. Under the (underlined) strongest attacks, the ensemble defense still achieves the best robustness performance (in bold).
|
| 299 |
+
|
| 300 |
+
<table><tr><td rowspan="2">Defense</td><td rowspan="2">Clean</td><td colspan="3">Simultaneous Adjoint</td><td colspan="3">Unrolled Intermediates</td></tr><tr><td>Final</td><td>Intermediate</td><td>Ensemble</td><td>Final</td><td>Intermediate</td><td>Ensemble</td></tr><tr><td>Final</td><td>92.53</td><td>8.90</td><td>11.45</td><td>3.69</td><td>0.00</td><td>0.00</td><td>0.00</td></tr><tr><td>Early</td><td>38.81</td><td>6.08</td><td>4.54</td><td>3.42</td><td>2.00</td><td>2.94</td><td>1.31</td></tr><tr><td>Ensemble</td><td>87.31</td><td>9.12</td><td>6.39</td><td>3.48</td><td>0.00</td><td>0.00</td><td>0.00</td></tr></table>
|
| 301 |
+
|
| 302 |
+
distortion. This explains the delay that the unrolled gradient at $\mathbf{z}_1$ affects the robustness of $\mathbf{z}_3$ . More ablation studies on the unrolled intermediates can be found in Appendix E.1.
|
| 303 |
+
|
| 304 |
+
# 6.5 Performance of the proposed attacks on vanilla DEQ models
|
| 305 |
+
|
| 306 |
+
In this section, we evaluate the performance of the proposed attacks on the DEQ models without adversarial training. We train a DEQ-Large on CIFAR-10 with standard training following the recipe in [5], and use the ready-made PGD-10 to attack the model. The clean accuracy of each state $\mathbf{z}_n$ , as well as its robust accuracy is shown in Table 5. Different from the robustness accumulation effect in the adversially-trained DEQs (shown in Sec. 6.2, Fig. 2-(a), and Appendix C), the ready-made PGD-10 already has a dramatic effect in attacking all the states in the standardly-trained DEQ.
|
| 307 |
+
|
| 308 |
+
We proceed to apply all the proposed attacks and defense strategies. Following Sec. 5.2, we determine the optimal timing for early exiting the standardly-trained DEQ as state $\mathbf{z}_1$ . Shown in Table 6, it can be seen that all the proposed attacks can defeat the DEQ by standard training. As the white-box robustness of DEQs is assessed by the strongest defense under all attacks (minimum over all columns in a row, then maximum over the minimum of the rows), the white-box robustness of the vanilla DEQ is $1.31\%$ with a $38.81\%$ clean accuracy using the early-state defense. When using the final-state and the ensemble-state defense, the robustness is $0\%$ . These results validate that all the proposed attacks are reliably strong as they all defeat the DEQ models without adversarial training.
|
| 309 |
+
|
| 310 |
+
# 7 Conclusion
|
| 311 |
+
|
| 312 |
+
We study the adversarial robustness of general DEQs, using the exact gradient and the unrolling-based phantom gradient in adversarial training for DEQs, respectively. We observe the gradient obfuscation issues in DEQs under ready-made attacks. Based on the misalignment between the forward and backward tracks, we leverage intermediate states in the forward pass to construct white-box attacks and defense strategies and benchmark the white-box robustness performance of DEQs.
|
| 313 |
+
|
| 314 |
+
While we have performed a serious comparison of white-box robustness between DEQs and deep networks, it can be seen that the performance of DEQs is on par with that of deep networks. Our empirical observations indicate that we should explore more advanced AT mechanisms for DEQs, in order to exploit their local attractor structures. A potential way is to explicitly encourage closed-loop control during training, similar to the mechanism introduced in [11]. To this end, the gradient estimation method proposed in this paper would be one of the critical ingredients for solving the misalignment between the forward/backward pass of DEQs.
|
| 315 |
+
|
| 316 |
+
# Acknowledgements
|
| 317 |
+
|
| 318 |
+
This work was supported by the National Natural Science Foundation of China (No.61925601) and Beijing Academy of Artificial Intelligence (BAAI). We appreciate all of the anonymous reviewers for their comments and suggestions on this work.
|
| 319 |
+
|
| 320 |
+
# References
|
| 321 |
+
|
| 322 |
+
[1] Maksym Andriushchenko, Francesco Croce, Nicolas Flammarion, and Matthias Hein. Square attack: a query-efficient black-box adversarial attack via random search. In European Conference on Computer Vision (ECCV), pages 484-501. Springer, 2020.
|
| 323 |
+
[2] Anish Athalye, Nicholas Carlini, and David Wagner. Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples. In International Conference on Machine Learning (ICML), 2018.
|
| 324 |
+
[3] Shaojie Bai, J. Zico Kolter, and Vladlen Koltun. Deep Equilibrium Models. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence d'Alché-Buc, Emily B. Fox, and Roman Garnett, editors, Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pages 688-699, 2019.
|
| 325 |
+
[4] Shaojie Bai, Vladlen Koltun, and J. Zico Kolter. Multiscale Deep Equilibrium Models. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin, editors, Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020.
|
| 326 |
+
[5] Shaojie Bai, Vladlen Koltun, and J. Zico Kolter. Stabilizing Equilibrium Models by Jacobian Regularization. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pages 554-565. PMLR, 2021.
|
| 327 |
+
[6] Battista Biggio, Igino Corona, Davide Maiorca, Blaine Nelson, Nedim Šrndić, Pavel Laskov, Giorgio Giacinto, and Fabio Roli. Evasion attacks against machine learning at test time. In Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pages 387-402. Springer, 2013.
|
| 328 |
+
[7] C. G. Broyden. A Class of Methods for Solving Nonlinear Simultaneous Equations. Mathematics of Computation, 19:577-593, 1965.
|
| 329 |
+
[8] Nicholas Carlini, Anish Athalye, Nicolas Papernot, Wieland Brendel, Jonas Rauber, Dimitris Tsipras, Ian Goodfellow, Aleksander Madry, and Alexey Kurakin. On evaluating adversarial robustness. arXiv preprint arXiv:1902.06705, 2019.
|
| 330 |
+
[9] Ricky T Q Chen, Yulia Rubanova, Jesse Bettencourt, and David Duvenaud. Neural Ordinary Differential Equations. In NeurIPS, 2018.
|
| 331 |
+
[10] Tong Chen, Jean B. Lasserre, Victor Magron, and Edouard Pauwels. Semialgebraic Representation of Monotone Deep Equilibrium Models and Applications to Certification. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, 2021.
|
| 332 |
+
[11] Zhuotong Chen, Qianxiao Li, and Zheng Zhang. Towards robust neural networks via close-loop control. In International Conference on Learning Representations, 2021.
|
| 333 |
+
[12] Francesco Croce, Maksym Andriushchenko, Vikash Sehwag, Edoardo Debenedetti, Nicolas Flammarion, Mung Chiang, Prateek Mittal, and Matthias Hein. RobustBench: a standardized adversarial robustness benchmark. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 2), 2021.
|
| 334 |
+
[13] Francesco Croce and Matthias Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In International Conference on Machine Learning (ICML), 2020.
|
| 335 |
+
[14] Zhengyang Geng, Xin-Yu Zhang, Shaojie Bai, Yisen Wang, and Zhouchen Lin. On Training Implicit Models. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, 2021.
|
| 336 |
+
[15] Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. In International Conference on Learning Representations (ICLR), 2015.
|
| 337 |
+
|
| 338 |
+
[16] Swaminathan Gurumurthy, Shaojie Bai, Zachary Manchester, and J Zico Kolter. Joint inference and input optimization in equilibrium networks. In A. Beygelzimer, Y. Dauphin, P. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, 2021.
|
| 339 |
+
[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016.
|
| 340 |
+
[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 770-778, 2016.
|
| 341 |
+
[19] Yifei Huang, Yaodong Yu, Hongyang Zhang, Yi Ma, and Yuan Yao. Adversarial Robustness of Stabilized NeuralODEs Might be from Obfuscated Gradients. CoRR, abs/2009.13145, 2020.
|
| 342 |
+
[20] Saber Jafarpour, Matthew Abate, Alexander Davydov, Francesco Bullo, and Samuel Coogan. Robustness Certificates for Implicit Neural Networks: A Mixed Monotone Contractive Approach. CoRR, abs/2112.05310, 2021.
|
| 343 |
+
[21] Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. Technical report, Citeseer, 2009.
|
| 344 |
+
[22] Alexey Kurakin, Ian Goodfellow, and Samy Bengio. Adversarial machine learning at scale. In International Conference on Learning Representations (ICLR), 2017.
|
| 345 |
+
[23] Guohao Li, Matthias Müller, Bernard Ghanem, and Vladlen Koltun. Training Graph Neural Networks with 1000 Layers. In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pages 6437-6449. PMLR, 2021.
|
| 346 |
+
[24] Cheng Lu, Jianfei Chen, Chongxuan Li, Qiuhao Wang, and Jun Zhu. Implicit Normalizing Flows. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021.
|
| 347 |
+
[25] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In International Conference on Learning Representations (ICLR), 2018.
|
| 348 |
+
[26] Seyed-Mohsen Moosavi-Dezfooli, Alhussein Fawzi, and Pascal Frossard. Deepfool: a simple and accurate method to fool deep neural networks. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 2574–2582, 2016.
|
| 349 |
+
[27] Mark Niklas Müller, Robin Staab, Marc Fischer, and Martin T. Vechev. Effective Certification of Monotone Deep Equilibrium Models. CoRR, abs/2110.08260, 2021.
|
| 350 |
+
[28] Chirag Pabbaraju, Ezra Winston, and J. Zico Kolter. Estimating Lipschitz constants of monotone deep equilibrium models. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. OpenReview.net, 2021.
|
| 351 |
+
[29] Tianyu Pang, Xiao Yang, Yinpeng Dong, Hang Su, and Jun Zhu. Bag of tricks for adversarial training. In International Conference on Learning Representations (ICLR), 2021.
|
| 352 |
+
[30] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems (NeurIPS), pages 8024-8035, 2019.
|
| 353 |
+
[31] Lev Semenovich Pontryagin, EF Mishchenko, VG Boltyanskii, and RV Gamkrelidze. The mathematical theory of optimal processes, 1962.
|
| 354 |
+
[32] Aditi Raghunathan, Sang Michael Xie, Fanny Yang, John Duchi, and Percy Liang. Understanding and mitigating the tradeoff between robustness and accuracy. In International Conference on Machine Learning (ICML), 2020.
|
| 355 |
+
|
| 356 |
+
[33] Zaccharie Ramzi, Florian Mannel, Shaojie Bai, Jean-Luc Starck, Philippe Ciuciu, and Thomas Moreau. SHINE: SHaring the INverse estimate from the forward pass for bi-level optimization and implicit models. In International Conference on Learning Representations, 2022.
|
| 357 |
+
[34] Max Revay, Ruigang Wang, and Ian R. Manchester. Lipschitz Bounded Equilibrium Networks. CoRR, abs/2010.01732, 2020.
|
| 358 |
+
[35] Leslie Rice, Eric Wong, and J Zico Kolter. Overfitting in adversarially robust deep learning. In International Conference on Machine Learning (ICML), 2020.
|
| 359 |
+
[36] David E. Rumelhart, Geoffrey E. Hinton, and Ronald J. Williams. Learning Representations by Back-propagating Errors. Nature, 323(6088):533-536, 1986.
|
| 360 |
+
[37] Dong Su, Huan Zhang, Hongge Chen, Jinfeng Yi, Pin-Yu Chen, and Yupeng Gao. Is robustness the cost of accuracy? – a comprehensive study on the robustness of 18 deep image classification models. In The European Conference on Computer Vision (ECCV), 2018.
|
| 361 |
+
[38] Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In International Conference on Learning Representations (ICLR), 2014.
|
| 362 |
+
[39] Dimitris Tsipras, Shibani Santurkar, Logan Engstrom, Alexander Turner, and Aleksander Madry. Robustness may be at odds with accuracy. In International Conference on Learning Representations (ICLR), 2019.
|
| 363 |
+
[40] Ezra Winston and J. Zico Kolter. Monotone operator equilibrium networks. In Hugo Larochelle, Marc'Aurelio Ranzato, Raia Hadsell, Maria-Florina Balcan, and Hsuan-Tien Lin, editors, Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual, 2020.
|
| 364 |
+
[41] Sergey Zagoruyko and Nikos Komodakis. Wide residual networks. In The British Machine Vision Conference (BMVC), 2016.
|
| 365 |
+
[42] Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric P Xing, Laurent El Ghaoui, and Michael I Jordan. Theoretically principled trade-off between robustness and accuracy. In International Conference on Machine Learning (ICML), 2019.
|
| 366 |
+
|
| 367 |
+
# Checklist
|
| 368 |
+
|
| 369 |
+
1. For all authors...
|
| 370 |
+
|
| 371 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 372 |
+
(b) Did you describe the limitations of your work? [Yes] We describe the future work in the Conclusion section. We attach analysis and discuss about the limitations of our work in Appendices.
|
| 373 |
+
(c) Did you discuss any potential negative societal impacts of your work? [Yes] We discuss them in the Broad Impact section.
|
| 374 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 375 |
+
|
| 376 |
+
2. If you are including theoretical results...
|
| 377 |
+
|
| 378 |
+
(a) Did you state the full set of assumptions of all theoretical results? [Yes]
|
| 379 |
+
(b) Did you include complete proofs of all theoretical results? [Yes]
|
| 380 |
+
|
| 381 |
+
3. If you ran experiments...
|
| 382 |
+
|
| 383 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes]
|
| 384 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes]
|
| 385 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [No]
|
| 386 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [Yes]
|
| 387 |
+
|
| 388 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 389 |
+
|
| 390 |
+
(a) If your work uses existing assets, did you cite the creators? [Yes]
|
| 391 |
+
(b) Did you mention the license of the assets? [N/A]
|
| 392 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [N/A]
|
| 393 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
|
| 394 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [N/A]
|
| 395 |
+
|
| 396 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 397 |
+
|
| 398 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
|
| 399 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 400 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
acloserlookattheadversarialrobustnessofdeepequilibriummodels/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c0704f0dc71414adb92fae8998b8cbe4a2a9d30970f2ab3a3a6749b2005c4252
|
| 3 |
+
size 455945
|
acloserlookattheadversarialrobustnessofdeepequilibriummodels/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:88bb5930c0e0787340c9681303ed4a2e62ab7c1f6a98267c62ce64a0f4ff154a
|
| 3 |
+
size 487400
|
acombinatorialperspectiveontheoptimizationofshallowrelunetworks/423a6c06-21c0-4880-ac32-ea9b8a7ddda0_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b5f7878a149cc45d00cc879e418f175e4813bb932836708712cffd7a43048f2e
|
| 3 |
+
size 73117
|
acombinatorialperspectiveontheoptimizationofshallowrelunetworks/423a6c06-21c0-4880-ac32-ea9b8a7ddda0_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:71c3162813adb076db3c740699a5e69a6651bd34a6cf8b4e018e4090e0c33bad
|
| 3 |
+
size 91328
|
acombinatorialperspectiveontheoptimizationofshallowrelunetworks/423a6c06-21c0-4880-ac32-ea9b8a7ddda0_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a4bf8523d3eea48ddd1b12c73ac9a35e606ab2145ef9120d38ff95065e4177d5
|
| 3 |
+
size 470059
|
acombinatorialperspectiveontheoptimizationofshallowrelunetworks/full.md
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Combinatorial Perspective on the Optimization of Shallow ReLU Networks
|
| 2 |
+
|
| 3 |
+
Michael Matena
|
| 4 |
+
|
| 5 |
+
Department of Computer Science
|
| 6 |
+
|
| 7 |
+
University of North Carolina at Chapel Hill
|
| 8 |
+
|
| 9 |
+
Chapel Hill, NC 27599
|
| 10 |
+
|
| 11 |
+
mmatena@cs.unc.edu
|
| 12 |
+
|
| 13 |
+
Colin Raffel
|
| 14 |
+
|
| 15 |
+
Department of Computer Science
|
| 16 |
+
|
| 17 |
+
University of North Carolina at Chapel Hill
|
| 18 |
+
|
| 19 |
+
Chapel Hill, NC 27599
|
| 20 |
+
|
| 21 |
+
craffel@cs.unc.edu
|
| 22 |
+
|
| 23 |
+
# Abstract
|
| 24 |
+
|
| 25 |
+
The NP-hard problem of optimizing a shallow ReLU network can be characterized as a combinatorial search over each training example's activation pattern followed by a constrained convex problem given a fixed set of activation patterns. We explore the implications of this combinatorial aspect of ReLU optimization in this work. We show that it can be naturally modeled via a geometric and combinatoric object known as a zonotope with its vertex set isomorphic to the set of feasible activation patterns. This assists in analysis and provides a foundation for further research. We demonstrate its usefulness when we explore the sensitivity of the optimal loss to perturbations of the training data. Later we discuss methods of zonotope vertex selection and its relevance to optimization. Overparameterization assists in training by making a randomly chosen vertex more likely to contain a good solution. We then introduce a novel polynomial-time vertex selection procedure that provably picks a vertex containing the global optimum using only double the minimum number of parameters required to fit the data. We further introduce a local greedy search heuristic over zonotope vertices and demonstrate that it outperforms gradient descent on underparameterized problems.
|
| 26 |
+
|
| 27 |
+
# 1 Introduction
|
| 28 |
+
|
| 29 |
+
Neural networks have become commonplace in a variety of applications. They are typically trained to minimize a loss on a given dataset of labeled examples using a variant of stochastic gradient descent. However, our theoretical knowledge of neural networks and their training lags behind their practical developments.
|
| 30 |
+
|
| 31 |
+
Single-layer ReLU networks are an appealing subject for theoretical study. The universal approximation theorem guarantees their expressive power while their relative simplicity makes analysis tractable (Hornik, 1991). We restrict ourselves in this paper to studying empirical risk minimization (ERM) as was done in previous works (Du et al., 2018; Oymak & Soltanolkotabi, 2020), which is justified since the train set performance tends to upper bound the test set performance. Furthermore, modern neural networks achieve zero training loss but nevertheless generalize well (Kaplan et al., 2020; Nakkiran et al., 2021). Minimizing the training loss of a shallow ReLU network is a nonconvex optimization problem. Finding its global minima is difficult and can in fact be shown to be NP-hard in general (Goel et al., 2020). Arora et al. (2016) provide an explicit algorithm for finding the global minima by solving a set of convex optimization problems; however, the size of this set is exponential in both the input dimension $d$ and the number of hidden units $m$ .
|
| 32 |
+
|
| 33 |
+
In this paper, we explore the combinatorial structure implicit in the global optimization algorithm of Arora et al. (2016). We start by using tools from polyhedral geometry to characterize the set of convex optimization problems and describe the relationships between the subproblems. Notably, we are
|
| 34 |
+
|
| 35 |
+
able to create a special type of polytope called a zonotope (McMullen, 1971) whose vertices have a one-to-one correspondence with the convex subproblems and whose faces represent information about their relationships. We then explore the combinatorial optimization problem implicit in shallow ReLU network empirical risk minimization using the zonotope formalism to help interpret our findings and assist in some proofs.
|
| 36 |
+
|
| 37 |
+
Since the computational complexity of optimization problems shapes our approach to solving them, we examine the reductions of NP-hard problems introduced in Goel et al. (2020). The datasets produced have examples that are not in general position (i.e. they have nontrivial affine dependencies), which differs from most real-world datasets. We prove that the global optimum of the loss of a shallow ReLU network over such a dataset can have a discontinuous jump for arbitrarily small perturbations of the data, which has a very natural interpretation in our zonotope formalism. This means that a proof of the NP-hardness of ReLU optimization given training examples in general position does not follow from the results Goel et al. (2020) via a simple continuity argument. We therefore present a modification of their proof that uses a dataset in general position.
|
| 38 |
+
|
| 39 |
+
In contrast to the NP-hardness of general ReLU optimization, sufficient overparameterization allows gradient descent to provably converge to a global optimum in polynomial time, as demonstrated by (Du et al., 2018; Oymak & Soltanolkotabi, 2020). We interpret the proof methods generally used in these works as asserting that sufficient overparameterization allows gradient descent to bypass much of the combinatorial search over zonotope vertices by having a randomly chosen vertex be close to one with a good solution with high probability. We then introduce a novel algorithm that finds a good zonotope vertex in polynomial time requiring only about twice the minimum number of hidden units required to fit the dataset.
|
| 40 |
+
|
| 41 |
+
Finally, we explore how gradient descent interacts with this combinatorial structure. We provide empirical evidence that it can perform some aspects of combinatorial search but present an informal argument that it is suboptimal. We reinforce this claim by showing that a greedy local search heuristic over the vertices of the zonotope outperforms gradient descent on some toy synthetic problems and simplifications of real-world tasks. In contrast to the NP-hard worst case, these results suggest that the combinatorial searches encountered in practice might be relatively tractable.
|
| 42 |
+
|
| 43 |
+
We summarize our contributions as follows.
|
| 44 |
+
|
| 45 |
+
- We are the first to provide an in-depth exposition of the combinatorial structure arising from the set of feasible activation patterns that is implicit in shallow ReLU network optimization. In particular, we show that this structure can be characterized exactly as a Cartesian power of the zonotope generated by the set of training examples.
|
| 46 |
+
- We use this formalism to prove necessary conditions for the global optimum of a shallow ReLU network to be discontinuous with respect to the training dataset. We show that this implies that previous NP-hardness proofs of ReLU optimization do not automatically apply to datasets satisfying realistic assumptions, which we rectify by presenting a modification that uses a dataset in general position.
|
| 47 |
+
- We explore the role that combinatorial considerations play in the relationship between overparameterization and optimization difficulty. In particular, we introduce a novel polynomial-time algorithm fitting a generic dataset using twice the minimum number of parameters needed.
|
| 48 |
+
- We introduce a novel heuristic algorithm that performs a greedy search along edges of a zonotope and show that it outperforms gradient descent on some toy datasets.
|
| 49 |
+
|
| 50 |
+
We hope that the tools we introduce are generally useful in furthering our understanding of ReLU networks. Notably, they have deep connections to several well-established areas of mathematics (McMullen, 1971; Richter-Gebert & Ziegler, 2017; Ziegler, 2012), which might allow researchers to quickly make new insights by drawing upon existing results in those fields.
|
| 51 |
+
|
| 52 |
+
# 2 Empirical Risk Minimization for ReLU Networks
|
| 53 |
+
|
| 54 |
+
A single ReLU layer consists of an affine transformation followed by a coordinate-wise application of the ReLU nonlinearity $\phi(x) = \max\{x, 0\}$ . We can represent an affine transformation from $\mathbb{R}^d \to \mathbb{R}^m$ by an $m \times (d + 1)$ matrix by representing its inputs in homogeneous coordinates, i.e. by appending a $(d + 1)$ -th coordinate to network inputs that is always equal to 1. Hence a single ReLU layer with parameters $W$ can be written as $f_W(\mathbf{x}) = \phi(W\overline{\mathbf{x}})$ , where $\overline{\mathbf{x}}$ denotes $\mathbf{x}$ expressed
|
| 55 |
+
|
| 56 |
+
in homogeneous coordinates. A single hidden layer ReLU network consists of a single ReLU layer followed by an affine transformation. We focus on the case of a network with scalar output, so the second layer can be represented by a vector $\mathbf{v} \in \mathbb{R}^{m+1}$ . Although the second layer parameters are trained jointly with the first layer in practice, we assume that they are fixed for our analysis. This parallels simplifying assumptions made in previous work (Du et al., 2018).
|
| 57 |
+
|
| 58 |
+
Let $\ell : \mathbb{R} \times \mathbb{R} \to \mathbb{R}$ be a convex loss function such as MSE or cross-entropy. Since the second layer parameters are fixed, we can incorporate them into a modified loss function $\tilde{\ell} : \mathbb{R}^m \times \mathbb{R} \to \mathbb{R}$ given by $\tilde{\ell}(\mathbf{z}, y) = \ell(\mathbf{v}^T\bar{\mathbf{z}}, y)$ that operates directly on the first layer's activations $\mathbf{z}$ . This modified loss function is convex since it is the composition of a convex function with an affine function.
|
| 59 |
+
|
| 60 |
+
Suppose we are given $\mathcal{D} = \{(\mathbf{x}_i, y_i)\}_{i=1}^N$ as the training dataset with $\mathbf{x}_i \in \mathbb{R}^d$ and $y_i \in \mathbb{R}$ for all $i = 1, \ldots, N$ . Throughout this paper, we assume that $N > d + 1$ . Sometimes we will represent a dataset by a matrix $X \in \mathbb{R}^{(d+1) \times N}$ with each column corresponding to an example and its labels as the vector $\mathbf{y} \in \mathbb{R}^N$ . We say that $\mathcal{D}$ is in general position if there exist no nontrivial affine dependencies between the columns of $X$ . The empirical loss $L(W)$ , also known as the empirical risk, is defined as the mean per-example loss
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
L (W) = \frac {1}{N} \sum_ {i = 1} ^ {N} \tilde {\ell} \left(f _ {W} \left(\mathbf {x} _ {i}\right), y _ {i}\right). \tag {1}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
The goal of ERM is to find a set of parameters $W$ that minimizes this loss. Arora et al. (2016) were the first to introduce an algorithm for exact ERM. We adapt their algorithm for the case of fixed second layer weights in algorithm 1, which has a running time of $O(N^{md}\mathrm{poly}(N,m,d))$ . The algorithm works by iterating over all feasible activation patterns
|
| 67 |
+
|
| 68 |
+
$$
|
| 69 |
+
\mathcal {A} = \left\{\mathbb {I} \{W \bar {X} > 0 \} \in \{0, 1 \} ^ {m \times N} \mid W \in \mathbb {R} ^ {m \times (d + 1)} \right\}. \tag {2}
|
| 70 |
+
$$
|
| 71 |
+
|
| 72 |
+
The subset of parameters corresponding to an activation pattern, which we call an activation region, can be expressed via a set of linear inequalities. Within a single activation region, the map from parameter values to ReLU layer activations over the training dataset is linear. Hence we can solve a constrained convex optimization problem to get the optimal parameters in each activation region. Namely for a given $A \in \mathcal{A}$ , we solve for $W \in \mathbb{R}^{m \times (d + 1)}$ in the following
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
\text {m i n i m i z e} \quad \frac {1}{N} \sum_ {i = 1} ^ {N} \tilde {\ell} \left(\mathbf {a} _ {i} \odot \left(W \bar {\mathbf {x}} _ {i}\right), y _ {i}\right) \tag {3}
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
\text {s u b j e c t} \quad (2 \mathbf {a} _ {i} - 1) \odot (W \bar {\mathbf {x}} _ {i}) \geq 0
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
where $\odot$ denotes the Hadamard product and $\mathbf{a}_i$ denotes the $i$ -th column of $A$ . The global optimum then becomes the best optimum found over the entire set of activation regions. Thus single-layer ReLU network ERM can be expressed as a combinatorial search over activation patterns with a convex optimization step per pattern.
|
| 83 |
+
|
| 84 |
+
# 3 Zonotope Formalism
|
| 85 |
+
|
| 86 |
+
While Arora et al. (2016) mention that $\mathcal{A}$ arises from a set of hyperplanes induced by the training examples, they only use this connection to bound its cardinality $|\mathcal{A}| = O(N^{md})$ . However, hyperplane arrangements are well-studied geometric and combinatoric objects (Richter-Gebert & Ziegler, 2017; Stanley et al., 2004). As such, we will see that we can use this connection to better characterize the combinatorial aspects of ReLU optimization.
|
| 87 |
+
|
| 88 |
+
Our mathematical tools for describing the combinatorial structure of shallow ReLU network optimization include oriented hyperplane arrangements, polyhedral sets, polyhedral complexes, and zonotopes. Appendix A provides an approachable overview of these topics for unfamiliar readers.
|
| 89 |
+
|
| 90 |
+
Single Hidden Unit We start by considering a single ReLU unit $f_{\mathbf{w}}(\mathbf{x}) = \phi (\mathbf{w}^T\bar{\mathbf{x}})$ parameterized by the vector $\mathbf{w}\in \mathbb{R}^{d + 1}$ . Looking at its behavior as $\mathbf{w}$ ranges over $\mathbb{R}^{d + 1}$ on a single training example $\mathbf{x}_i$ , we see that there are two linear regimes depending on the sign of $\mathbf{w}^T\bar{\mathbf{x}}$ . They are separated by the hyperplane in parameter space satisfying $\mathbf{w}^T\bar{\mathbf{x}} = 0$ . We can describe such behavior mathematically as an oriented hyperplane with the sign of $\mathbf{w}^T\bar{\mathbf{x}}$ providing its orientation. The
|
| 91 |
+
|
| 92 |
+

|
| 93 |
+
Figure 1: Left: A set of 5 training examples in $\mathbb{R}^2$ . Center: A two-dimensional slice of parameter space $\mathbb{R}^3$ along the $w_{3} = 1$ plane reflecting the polyhedral complex $\mathcal{R}$ . The lines correspond to the set of hyperplanes $H_1^0, \ldots, H_5^0$ . The different shaded chambers correspond to different activation regions. Each chamber can be thought of as the base of cone whose apex is the origin. Right: The zonotope $\mathcal{Z}$ for this dataset. The corresponding activation region for a vertex is indicated by the colored circles. Note how the edges and faces of $\mathcal{Z}$ capture the incidence structure of the activation regions. Each of the red lines is a translation of a (homogenized) training example. Exactly these 3 training examples are active in the yellow activation region.
|
| 94 |
+
|
| 95 |
+

|
| 96 |
+
|
| 97 |
+

|
| 98 |
+
|
| 99 |
+
collection of oriented hyperplanes associated to each training example $\{\bar{\mathbf{x}}_i\}_{i = 1}^N$ is known as an oriented hyperplane arrangement (Richter-Gebert & Ziegler, 2017).
|
| 100 |
+
|
| 101 |
+
Algorithm 1 Exact ERM (Arora et al., 2016)
|
| 102 |
+
Input: data $\mathcal{D} = \{\mathbf{x}_i,y_i\}_{i = 1}^N$ ,2nd layer $\mathbf{v}\in \mathbb{R}^{m + 1}$ $\mathcal{A}\subseteq \{0,1\}^{m\times N}$ {feasible activation patterns (2)}
|
| 103 |
+
$W^{*}\in \mathbb{R}^{m\times (d + 1)}$ {random initialization}
|
| 104 |
+
for $A\in \mathcal{A}$ do
|
| 105 |
+
$W\gets$ solution of (3)
|
| 106 |
+
if $L(\tilde{W}) < L(W^{*})$ then
|
| 107 |
+
$W^{*}\gets W$
|
| 108 |
+
end if
|
| 109 |
+
end for
|
| 110 |
+
return $W^{*}$
|
| 111 |
+
|
| 112 |
+
The structure imposed on parameter space $\mathbb{R}^{d + 1}$ by this oriented hyperplane arrangement can be described as a polyhedral complex (Ziegler, 2012), which is a collection of polyhedral sets and their faces that fit together in a "nice" way. The polyhedral complex $\mathcal{R}$ induced by the training set will contain codimension 0 sets called chambers. These correspond exactly to activation regions. Activation patterns have a one-to-one correspondence with the tuple of hyperplane orientations associated to each chamber. The center panel of fig. 1 provides an illustration of $\mathcal{R}$ for an example dataset.
|
| 113 |
+
|
| 114 |
+
The dual zonotope of a polyhedral complex
|
| 115 |
+
|
| 116 |
+
is a single polytope providing an alternate representation of its combinatorial structure (Ziegler, 2012). Each dimension $k$ member of the polyhedral complex has a corresponding codimension $k$ face in the dual zonotope. Incidence relations between the members of the complex are preserved in the dual zonotope. Generally, a zonotope can be described as the image of an $N$ -dimensional hypercube under a linear map whose columns are known as its generators (McMullen, 1971). Each vertex of the zonotope thus is a weighted sum of its generators with coefficients belonging to $\{0,1\}$ .
|
| 117 |
+
|
| 118 |
+
The dual zonotope $\mathcal{Z}$ of our polyhedral complex $\mathcal{R}$ has the training examples $\{\bar{\mathbf{x}}_i\}_{i=1}^N$ as its generators. The vertices of $\mathcal{Z}$ have a one-to-one correspondence to the activation regions of our network. When a vertex is expressed as a weighted sum over the generators, the coefficient $\{0,1\}$ of each generator equals its corresponding example's value in the region's activation pattern. The right panel of fig. 1 shows an example zonotope $\mathcal{Z}$ and its duality with the polyhedral complex $\mathcal{R}$ .
|
| 119 |
+
|
| 120 |
+
These correspondences allow us to assign additional structure to the set of activation patterns $\mathcal{A}$ rather than just treating it as an unstructured set. For example, the 1-skeleton of the zonotope $\mathcal{Z}$ , which is the graph formed by its vertices and edges, provides a means of traversing the set of activation patterns. Furthermore, we can directly make connections between the training dataset and the activation pattern structure by making use of the fact that $\mathcal{Z}$ is generated by the training examples.
|
| 121 |
+
|
| 122 |
+
Multiple Hidden Units In the multiple hidden unit setting, i.e. $m > 1$ , note that parameter space becomes an $m$ -fold Cartesian product of single-unit parameter spaces. Furthermore, we are free to set the activation pattern for each unit independently of the others. As the combinatorial structure for
|
| 123 |
+
|
| 124 |
+
each hidden unit can be described using the zonotope $\mathcal{Z}$ , the combinatorial structure for a multiple hidden unit network is described by the $m$ -fold Cartesian product $\mathcal{Z}^m = \prod_{i=1}^{m} \mathcal{Z}$ . As noted in appendix A.3.1, $\mathcal{Z}^m$ is also a zonotope. Each vertex of $\mathcal{Z}^m$ corresponds to a product of $m$ vertices of $\mathcal{Z}$ . As in the single unit case, there is a one-to-one correspondence between the vertices of $\mathcal{Z}^m$ and the set of activation patterns $\mathcal{A}$ .
|
| 125 |
+
|
| 126 |
+
# 4 ReLU Optimization
|
| 127 |
+
|
| 128 |
+
# 4.1 NP-Hardness
|
| 129 |
+
|
| 130 |
+
Given the additional structure we have imposed on the set of activation patterns in algorithm 1, it is natural to ask whether we can use it to develop a global optimization algorithm that is more efficient than a brute-force search over activation patterns. Unfortunately, several works (Goel et al., 2020; Froese et al., 2021) have demonstrated that global optimization of a shallow ReLU network is NP-hard. Nevertheless, this does not preclude the existence of an efficient combinatorial optimizer given certain conditions on the input dataset. Since the zonotope $\mathcal{Z}^m$ encapsulates the combinatorial structure of the optimization problem, we look to see if properties of $\mathcal{Z}^m$ can be related to the difficulty of combinatorial optimization.
|
| 131 |
+
|
| 132 |
+
Nontrivial affine dependencies between training examples influence the combinatorial structure of the $\mathcal{Z}^m$ . Since the reductions of NP-hard problems to ReLU optimization done in Goel et al. (2020); Froese et al. (2021) create datasets with such nontrivial dependencies, it is natural to ask whether it is NP-hard to optimize a shallow ReLU network over a training dataset in general position.
|
| 133 |
+
|
| 134 |
+
# 4.1.1 Discontinuity of the Global Optimum
|
| 135 |
+
|
| 136 |
+
If the global minimum of the loss is always continuous with respect to the input dataset, then the NP-hardness of optimization over arbitrary datasets in general position would follow from continuity since every set of points is arbitrarily close to a set in general position. However, we can prove that such continuity holds unconditionally for ReLU optimization only in the case where the training dataset is in general position. We give a sketch of the proof here along with some analysis of the failure cases that can happen when the data are not in general positions. We provide a full proof in appendix C.
|
| 137 |
+
|
| 138 |
+
Theorem 4.1. Suppose we are given a dataset $\mathcal{D} = \{(\mathbf{x}_i, y_i)\}_{i=1}^N$ in general position and some $m \in \mathbb{N}$ . Let $L^{*}(\mathcal{D})$ denote the global minimum of the loss (1) over the dataset $\mathcal{D}$ for a shallow ReLU network with $m$ units. Given any $\epsilon > 0$ , some $\delta > 0$ exists such that $|L^{*}(\mathcal{D}) - L^{*}(\mathcal{D}_{\epsilon})| < \delta$ for any dataset $\mathcal{D}_{\epsilon} = \{(\mathbf{x}_i', y_i)\}_{i=1}^N$ satisfying $\|\mathbf{x}_i - \mathbf{x}_i'\|_2 \leq \epsilon$ .
|
| 139 |
+
|
| 140 |
+
Proof sketch. For a small enough perturbation, we can prove that the datasets' zonotopes are combinatorially equivalent. Hence their sets of feasible activation patterns will be exactly the same. Using the fact that any subset of a set in general position is also in general position, we can then show that the constrained convex optimization problem associated with each vertex is continuous with respect to the input dataset. Since the global minimum of the loss is just the minimum of the optimal loss for each vertex, its continuity follows from the fact that the composition of two continuous functions is continuous.
|
| 141 |
+
|
| 142 |
+
When the dataset $\mathcal{D}$ is not in general position, there are two possible ways in which breaking of nontrivial affine dependencies between examples can cause the global minimum of the loss to become discontinuous. The first is that the globally optimal vertex in the perturbed zonotope exists in the original zonotope, but its associated constrained convex optimization problem is discontinuous with respect to the dataset. This can happen when there are nontrivial affine dependencies that get broken amongst the active examples in the vertex. The second way is that the globally optimal vertex of the perturbed zonotope does not exist in the original zonotope. Geometrically, we can think of such a vertex as resulting from the breakdown of a non-parallelepiped higher dimension face (Gover, 2014). See appendix D for examples of these phenomena.
|
| 143 |
+
|
| 144 |
+
Analysis of Reductions We can use this characterization of the instabilities of the global optimum to perturbations in the training data to analyze the reductions of NP-hard problems used in Goel et al. (2020). We focus on the reduction of the NP-hard set cover problem to the optimization of
|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
Figure 2: Comparison between gradient descent and optimization with a fixed random activation pattern. Left: Results for MSE on synthetic data for $d = 8$ and $m_{\mathrm{gen}} = 8$ . The overparameterization factor times $m_{\mathrm{gen}}$ equals the number of units in the trained network. Right: Results for accuracy on Fashion MNIST coat/pullover binary classification for $d = 16$ and $N = 700$ .
|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
|
| 151 |
+
a single bias-free ReLU. In appendix E.2, we provide an explicit example of an arbitrarily small perturbation that results in the global minimum of the loss being independent of the solution to the set cover problem.
|
| 152 |
+
|
| 153 |
+
To the best of our knowledge, existing reductions of NP-hard problems to ReLU optimization all create datasets that are not in general position (Goel et al., 2020; Froese et al., 2021). Therefore, we present a modification of the set cover reduction that produces a dataset in general position. See appendix E.3 for details of this modification along with a proof that it is indeed a reduction of the set cover problem. We thus have the following statement.
|
| 154 |
+
|
| 155 |
+
Theorem 4.2. Optimizing a ReLU is NP-hard even when restricted to datasets in general position.
|
| 156 |
+
|
| 157 |
+
# 4.2 Polynomial Time Optimization via Overparameterization
|
| 158 |
+
|
| 159 |
+
Even though ReLU network optimization is NP-hard in general, it can be shown that overparameterization allows for gradient descent to converge to the global minimum in polynomial time (Du et al., 2018; Zou & Gu, 2019; Oymak & Soltanolkotabi, 2020; Allen-Zhu et al., 2019). This is not a contradiction since optimization of overparameterized ReLU networks is a strict subset of the set of all ReLU optimization problems.
|
| 160 |
+
|
| 161 |
+
The general proof method of these works usually involves demonstrating that overparameterization results in activation patterns not changing much throughout training. This allows gradient descent to effectively bypass the combinatorial search of the outer loop in algorithm 1. The remaining optimization problem can then be shown to be similar to the constrained convex optimization problem (3) by assuming that the second layer is frozen. Using the zonotope formalism, we can interpret these results as saying that a sufficiently large number of hidden units $m$ guarantees with high probability that a randomly chosen vertex corresponds to a region of parameter space containing a global minimum of the loss. Parameter initialization selects the random vertex in practice.
|
| 162 |
+
|
| 163 |
+
This can be justified theoretically through a connection to random feature models. Here we assume that the first layer is frozen, and the second layer forms a linear model over the random first layer features. As the number of units $m$ increases past the number of training examples $N$ , the set of first layer activations can become linearly independent. The probability of this approaches 1 as $m \to \infty$ . Whether all of the parameters within an activation region produce linearly dependent activations can be shown to depend solely on its activation pattern when the dataset is in general position.
|
| 164 |
+
|
| 165 |
+
To test this, we ran experiments comparing batch gradient descent to solving (3) for a randomly chosen vertex on some toy datasets. We created synthetic datasets by first choosing the input dimension $d$ and a positive integer $m_{\mathrm{gen}}$ . To get the training examples, we sampled $N = (d + 1)m_{\mathrm{gen}}$ points in $\mathbb{R}^d$ i.i.d. from the standard Gaussian distribution. We then sampled the weights of a shallow ReLU network with $m_{\mathrm{gen}}$ units i.i.d. from the standard Gaussian distribution. We used this network to create the labels for our synthetic dataset. See appendix H.1.1 for details on the data generation process.
|
| 166 |
+
|
| 167 |
+
We also created toy binary classification datasets from MNIST (LeCun et al., 2010) and Fashion MNIST (Xiao et al., 2017) by choosing two classes, $5/9$ and coat/pullover, respectively, to differentiate. We used the first $d \in \{8, 16\}$ components of the PCA whitened data and selected $N \in \{350, 700\}$ examples for our training sets. See appendix H.2 for details.
|
| 168 |
+
|
| 169 |
+
We present some of our results in fig. 2. See appendix H for details of the training procedures and for results on more $d$ , $m_{\mathrm{gen}}$ and $d$ , $N$ pairs. On synthetic data, we see that the random vertex method finds a good solution for overparameterization factors of 4 and up. However, gradient descent tends
|
| 170 |
+
|
| 171 |
+
to arrive at reasonably good solutions for lower levels of overparameterization while the random vertex method fails. This was a general trend that we observed across different $d$ , $m_{\mathrm{gen}}$ pairs on the synthetic datasets and $d$ , $N$ pairs on the binary classification datasets. Note that the Fashion MNIST networks represented in fig. 2 were relatively underparameterized with the maximal size of 64 units being overparameterized by only a factor of about 1.5.
|
| 172 |
+
|
| 173 |
+
This demonstrates that gradient descent can perform some aspects of the combinatorial search over zonotope vertices. We hypothesize that the gradient tends to be smaller within activation regions with a good optimum and thus gradient descent is more likely to stay within a good activation region. Conversely, the larger gradients within activation regions with poor optima make it more likely that a gradient descent step will move the parameters out of those regions. We can thus think of gradient descent as performing a pseudo-annealing process over the vertices of the zonotope since the likelihood of moving from one vertex to another decreases as the parameters settle into better activation regions.
|
| 174 |
+
|
| 175 |
+
# 4.2.1 Tighter Bounds
|
| 176 |
+
|
| 177 |
+
We now introduce a novel vertex selection scheme that runs in polynomial time and requires minimal overparameterization. Suppose $\mathcal{D} = \{(\mathbf{x}_i, y_i)\}_{i=1}^N$ is a dataset in general position. Assume that the examples are ordered by the value of their last coordinate, which we suppose is unique WLOG (i.e. $\mathbf{e}_d^T\mathbf{x}_i < \mathbf{e}_d^T\mathbf{x}_j$ for $i < j$ ). If not provided in this format, this can be accomplished in $O(N\log N)$ time. We now split the dataset into $\lceil \frac{N}{d+1} \rceil$ chunks containing at most $d+1$ examples. We write each chunk as $\mathcal{D}_k = \{(\mathbf{x}_i, y_i)\}_{i=(k-1)(d+1)+1}^{\min(N,k(d+1))}$ . Since each $\mathcal{D}_k$ contains a contiguous chunk of examples sorted along an axis in coordinate space, we see that we can always find a hyperplane separating $\mathcal{D}_k$ and $\mathcal{D}_{k'}$ for $k \neq k'$ . For each $k = 1, \ldots, \lceil \frac{N}{d+1} \rceil$ , we add two units to our ReLU network and assign them the activation pattern of 0 for examples belong to a $\mathcal{D}_{k'}$ with $k' < k$ and 1 for the remaining examples. One of the units will be multiplied by +1 in the second layer while the other will be multiplied by -1. Hence the network contains a total of $2\lceil \frac{N}{d+1} \rceil$ hidden units. We prove in appendix F that a set of weights with that activation pattern exists such that the output of the network on training examples exactly matches their labels. The key idea in the proof is that we can sequentially fit the examples in the $k$ -th chunk without undoing our progress in fitting the chunks before it.
|
| 178 |
+
|
| 179 |
+
Theorem 4.3. Given a dataset in $\mathbb{R}^d$ containing $N$ examples in general position, a shallow ReLU network containing $2\left\lceil \frac{N}{d + 1}\right\rceil$ hidden units can be found in polynomial time exactly fitting the dataset.
|
| 180 |
+
|
| 181 |
+
To the best of our knowledge, this is the tightest known bound on the amount of overparameterization needed to find the global optimum of a ReLU network in polynomial time. A simple argument comparing the number of unknowns and the number of equations demonstrates that we need at least $\frac{N}{d + 1}$ hidden units to exactly fit an arbitrary dataset with a shallow ReLU network. Hence our method uses only about twice as many hidden units as is necessary to fit the data. However, we emphasize that this ReLU optimization scheme is primarily of theoretical interest since we find in practice that the resulting ReLU network tends to be a very ill-conditioned function.
|
| 182 |
+
|
| 183 |
+
# 4.3 Relevance to Optimization in Practice
|
| 184 |
+
|
| 185 |
+
Practically all optimization of ReLU networks in practice uses some variant of gradient descent with an overparameterized network. As the degree of overparameterization goes down, gradient descent begins to arrive at increasingly suboptimal solutions (Nakkiran et al., 2021).
|
| 186 |
+
|
| 187 |
+
In section 4.2, we hypothesized how gradient descent can find activation regions containing good optima. However, the gradient of the loss is inherently a local property in parameter space while the space's decomposition into activation regions is inherently global. Boundaries between regions correspond to discontinuities in the gradient of the loss. We hypothesize that these properties lead to little direct information about the optimization problem being used to inform gradient descent's traversal over zonotope vertices. Hence we suspect that algorithms that explicitly traverse zonotope vertices using some loss-based criteria can outperform gradient descent in the underparameterized- to mildly-overparameterized regimes.
|
| 188 |
+
|
| 189 |
+
Algorithm 2 Greedy Local Search (GLS) Heuristic
|
| 190 |
+
|
| 191 |
+
<table><tr><td>Input: data D = {xi, yi}N i=1, 2nd layer
|
| 192 |
+
v ∈ Rm+1, max steps T ∈ N
|
| 193 |
+
A0 ∈ vert(Zm)
|
| 194 |
+
for t ∈ {0,...,T} do
|
| 195 |
+
A_{t+1} ← A_t
|
| 196 |
+
for A' ∈ neighbors(A_t) do
|
| 197 |
+
if L*(A';D) < L*(A_{t+1};D) then
|
| 198 |
+
A_{t+1} ← A'
|
| 199 |
+
end if
|
| 200 |
+
end for
|
| 201 |
+
if A_{t+1} = A_t then
|
| 202 |
+
return A_t
|
| 203 |
+
end if
|
| 204 |
+
end for
|
| 205 |
+
return AT</td></tr></table>
|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
Figure 3: Comparison between gradient descent and our GLS heuristics. Top: Results for MSE on synthetic data for $d = 4$ and $m_{\mathrm{gen}} = 2$ . The overparameterization factor times $m_{\mathrm{gen}}$ equals the number of units in the trained network. Bottom: Results for accuracy on Fashion MNIST coat/pullover binary classification for $d = 8$ and $N = 350$ .
|
| 211 |
+
|
| 212 |
+
# 4.3.1 Difficulty of Combinatorial Search
|
| 213 |
+
|
| 214 |
+
Unless $P = NP$ , we are unlikely to find an efficient algorithm to perform the combinatorial search in algorithm 1 for arbitrary datasets (Goel et al., 2020). However, this does not preclude the existence of heuristics that tend to work well on problems encountered in practice. We investigated this by using a greedy local search (GLS) over the graph formed by the zonotope's 1-skeleton. We start by selecting a vertex at random and find its corresponding optimal loss by solving a convex program. We iterate over its neighboring vertices and compute their optimal losses as well. We then move to the neighboring vertex with the lowest loss and repeat the process until we arrive at a vertex with lower loss than its neighbors. We then take that vertex's optimal parameters as our approximation to the global minimization problem. This algorithm is defined in detail in algorithm 2.
|
| 215 |
+
|
| 216 |
+
We also experimented with some additional heuristics that help the GLS converge faster by reducing the number of convex problems solved at each step. For example, we can greedily move to the first neighboring vertex encountered with a lower loss, which significantly decreases the time per step in the early stages of training. We can further improve this by using geometric information about a solution's relative location in its activation region to try certain vertices first. We call the algorithm with these heuristics modified greedy local search (mGLS) and define it in detail in appendix G.
|
| 217 |
+
|
| 218 |
+
Note that these heuristics are not guaranteed to return a global minimizer of the loss. Furthermore, there are an exponential number of vertices in the zonotope, so there are no immediate guarantees of them taking less than exponential time to run. However, each step takes polynomial time since each vertex has $O(mN)$ neighbors, so each step solves a polynomial number of convex optimization problems.
|
| 219 |
+
|
| 220 |
+
We ran experiments comparing these heuristics to gradient descent on toy datasets generated in the same way as in section 4.2. We used GLS on the synthetic data and mGLS on the MNIST and Fashion MNIST derived data. We present some of our results in fig. 3. See appendix H for details of the training procedures and for results for more values of $d$ , $m_{\mathrm{gen}}$ and $d$ , $N$ pairs. On synthetic data, the GLS heuristics significantly outperformed gradient descent. On binary classification tasks, mGLS outperformed gradient descent for networks with moderate levels of underparameterization and performed similarly otherwise. We observed a similar trend across the rest of the $d$ , $m_{\mathrm{gen}}$ and $d$ , $N$ pairs. These results support our hypothesis that gradient descent is suboptimal in the combinatorial search over activation patterns. Furthermore, they suggest that this combinatorial optimization might tend to be tractable in practice.
|
| 221 |
+
|
| 222 |
+
# 5 Related Work
|
| 223 |
+
|
| 224 |
+
Some of the concepts in this work also arise in Zhang et al. (2018). A key difference is that they analyze the activation regions in input space given a ReLU network with fixed parameters. We can, in fact, use their tropical geometric approach to derive our zonotope formalism for a single ReLU unit. To do so, the roles of the weights and data must be swapped; we instead use a fixed data matrix and varying vector of weights while they use a fixed weight matrix and varying vector of data. Our use of a zonotope generated by the training examples, however, is novel. Misiakos et al. (2021) show that the approximation error between two shallow ReLU networks depends on the Hausdorff distance between the zonotopes generated by the each network's units. Bach (2017) also use a Hausdorff distance between zonotopes in the context of neural network optimization.
|
| 225 |
+
|
| 226 |
+
Goel et al. (2020) provide proofs of the NP-hardness of optimization of shallow ReLU networks and the hardness of even finding an approximate solution. Froese et al. (2021) extend these results and show that the brute force search in Arora et al. (2016) cannot be avoided in the worst case. Du et al. (2018) was one of the first works to prove that overparameterization in shallow ReLU networks allows gradient descent to converge to a global optimum in polynomial time. Their bound of $\Omega(N^6)$ on the number of hidden units needed for convergence was improved upon by subsequent work (Ji & Telgarsky, 2019; Daniely, 2019). For example, Oymak & Soltanolkotabi (2020) proved a bound of $\Omega(N^2 / d)$ .
|
| 227 |
+
|
| 228 |
+
Pilanci & Ergen (2020) and Wang et al. (2021) represent global optimization of shallow ReLU networks with $\ell_2$ regularization using a convex optimization problem that operates simultaneously over all activation patterns for a single unit. Multiple units are handled by summing over the activations with different activation patterns. This leads to exponential complexity in the data dimension $d$ but avoids exponential complexity in the number of units $m$ . Dey et al. (2020) provide an example of a heuristic algorithm that searches over activation patterns for a single ReLU unit. Their algorithm operates on the principle that examples with large positive labels are more likely to belong to the active set in good solutions.
|
| 229 |
+
|
| 230 |
+
# 6 Conclusion
|
| 231 |
+
|
| 232 |
+
We introduced a novel characterization of the combinatorial structure of activation patterns implicit in the optimization of shallow ReLU networks. We showed that it can be described as a Cartesian product of zonotopes generated by the training examples. We used this zonotope formalism to explore aspects of the optimization of shallow ReLU networks. It provides a natural way to describe instabilities of the global minimum to perturbations of the dataset. We then related this to work on the NP-hardness of global ReLU optimization. In particular, we demonstrated that this optimization problem is still NP-hard even when restricted to datasets in general position, which is commonly assumed of data in practice.
|
| 233 |
+
|
| 234 |
+
We then explored how combinatorial considerations play into the relationship between overparameterization and polynomial-time optimization of shallow ReLU networks. Namely we interpret known results for gradient descent as stating that a randomly chosen zonotope vertex will be close to one whose activation region contains a good local optimum. We then provide empirical evidence that sufficient overparameterization makes it highly likely that a randomly chosen vertex has a good local optimum. We also provide a polynomial-time algorithm that can find a vertex containing the global optimum using approximately twice the minimum number of hidden units needed to fit the dataset exactly. Finally, we provide a GLS heuristic over zonotope vertices that outperforms gradient descent on some toy problems.
|
| 235 |
+
|
| 236 |
+
In future work we plan to theoretically and empirically explore heuristics and algorithms that perform well on real-world datasets. We hope to analyze how vertex choice impacts generalization. Further insights might be derived by exploring the connections of hyperplane arrangements to tropical geometry and oriented matroids (Stanley et al., 2004; Oxley, 2006; Maclagan & Sturmfels, 2015). One caveat of our theory is that it applies to only a shallow ReLU network. However, the concepts of activation patterns are still meaningful for deep ReLU networks but require real algebraic geometry for analysis (Basu, 2014; Bochnak et al., 2013). We hope that further research along these avenues will deepen our understanding of neural network training and enable improvements to training in practice.
|
| 237 |
+
|
| 238 |
+
# References
|
| 239 |
+
|
| 240 |
+
Agrawal, A., Amos, B., Barratt, S., Boyd, S., Diamond, S., and Kolter, Z. Differentiable convex optimization layers. arXiv preprint arXiv:1910.12430, 2019.
|
| 241 |
+
Allen-Zhu, Z., Li, Y., and Song, Z. A convergence theory for deep learning via over-parameterization. In International Conference on Machine Learning, pp. 242-252. PMLR, 2019.
|
| 242 |
+
Arora, R., Basu, A., Mianjy, P., and Mukherjee, A. Understanding deep neural networks with rectified linear units. arXiv preprint arXiv:1611.01491, 2016.
|
| 243 |
+
Bach, F. Breaking the curse of dimensionality with convex neural networks. The Journal of Machine Learning Research, 18(1):629-681, 2017.
|
| 244 |
+
Basu, S. Algorithms in real algebraic geometry: a survey. arXiv preprint arXiv:1409.1534, 2014.
|
| 245 |
+
Bochnak, J., Coste, M., and Roy, M.-F. Real algebraic geometry, volume 36. Springer Science & Business Media, 2013.
|
| 246 |
+
Daniely, A. Neural networks learning and memorization with (almost) no over-parameterization. arXiv preprint arXiv:1911.09873, 2019.
|
| 247 |
+
Dey, S. S., Wang, G., and Xie, Y. Approximation algorithms for training one-node relu neural networks. IEEE Transactions on Signal Processing, 68:6696-6706, 2020.
|
| 248 |
+
Diamond, S. and Boyd, S. Cvxpy: A python-embedded modeling language for convex optimization. The Journal of Machine Learning Research, 17(1):2909-2913, 2016.
|
| 249 |
+
Domahidi, A., Chu, E., and Boyd, S. Ecos: An socp solver for embedded systems. In 2013 European Control Conference (ECC), pp. 3071-3076. IEEE, 2013.
|
| 250 |
+
Du, S. S., Zhai, X., Poczos, B., and Singh, A. Gradient descent provably optimizes over-parameterized neural networks. arXiv preprint arXiv:1810.02054, 2018.
|
| 251 |
+
Froese, V., Hertrich, C., and Niedermeier, R. The computational complexity of relu network training parameterized by data dimensionality. arXiv preprint arXiv:2105.08675, 2021.
|
| 252 |
+
Glorot, X. and Bengio, Y. Understanding the difficulty of training deep feedforward neural networks. In Proceedings of the thirteenth international conference on artificial intelligence and statistics, pp. 249-256. JMLR Workshop and Conference Proceedings, 2010.
|
| 253 |
+
Goel, S., Klivans, A., Manurangsi, P., and Reichman, D. Tight hardness results for training depth-2 relu networks. arXiv preprint arXiv:2011.13550, 2020.
|
| 254 |
+
Govern, E. Congruence and metrical invariants of zonotopes. arXiv preprint arXiv:1401.4749, 2014.
|
| 255 |
+
Hornik, K. Approximation capabilities of multilayer feedforward networks. Neural networks, 4(2): 251-257, 1991.
|
| 256 |
+
Ji, Z. and Telgarsky, M. Polylogarithmic width suffices for gradient descent to achieve arbitrarily small test error with shallow relu networks. arXiv preprint arXiv:1909.12292, 2019.
|
| 257 |
+
Kaplan, J., McCandlish, S., Henighan, T., Brown, T. B., Chess, B., Child, R., Gray, S., Radford, A., Wu, J., and Amodei, D. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361, 2020.
|
| 258 |
+
LeCun, Y., Cortes, C., and Burges, C. Mnist handwritten digit database. ATT Labs [Online]. Available: http://yann.lecun.com/exdb/mnist, 2, 2010.
|
| 259 |
+
Maclagan, D. and Sturmfels, B. Introduction to tropical geometry, volume 161. American Mathematical Soc., 2015.
|
| 260 |
+
McMullen, P. On zonotopes. Transactions of the American Mathematical Society, 159:91-109, 1971.
|
| 261 |
+
|
| 262 |
+
Misiakos, P., Smyrnis, G., Retsinas, G., and Maragos, P. Neural network approximation based on Hausdorff distance of tropical zonotopes. In International Conference on Learning Representations, 2021.
|
| 263 |
+
Nakkiran, P., Kaplun, G., Bansal, Y., Yang, T., Barak, B., and Sutskever, I. Deep double descent: Where bigger models and more data hurt. Journal of Statistical Mechanics: Theory and Experiment, 2021(12):124003, 2021.
|
| 264 |
+
Oxley, J. G. Matroid theory, volume 3. Oxford University Press, USA, 2006.
|
| 265 |
+
Oymak, S. and Soltanolkotabi, M. Toward moderate overparameterization: Global convergence guarantees for training shallow neural networks. IEEE Journal on Selected Areas in Information Theory, 1(1):84-105, 2020.
|
| 266 |
+
Pedregosa, F., Varoquaux, G., Gramfort, A., Michel, V., Thirion, B., Grisel, O., Blondel, M., Prettenhofer, P., Weiss, R., Dubourg, V., et al. Scikit-learn: Machine learning in python. the Journal of machine Learning research, 12:2825-2830, 2011.
|
| 267 |
+
Pilanci, M. and Ergen, T. Neural networks are convex regularizers: Exact polynomial-time convex optimization formulations for two-layer networks. In International Conference on Machine Learning, pp. 7695-7705. PMLR, 2020.
|
| 268 |
+
Richter-Gebert, J. and Ziegler, G. M. 6: Oriented matroids. Chapman and Hall/CRC, 2017.
|
| 269 |
+
Stanley, R. P. et al. An introduction to hyperplane arrangements. Geometric combinatorics, 13 (389-496):24, 2004.
|
| 270 |
+
Wang, Y., Lacotte, J., and Pilanci, M. The hidden convex optimization landscape of regularized two-layer relu networks: an exact characterization of optimal solutions. In International Conference on Learning Representations, 2021.
|
| 271 |
+
Xiao, H., Rasul, K., and Vollgraf, R. Fashion-mnist: a novel image dataset for benchmarking machine learning algorithms. arXiv preprint arXiv:1708.07747, 2017.
|
| 272 |
+
Xu, Y. and Yin, W. A block coordinate descent method for regularized multiconvex optimization with applications to nonnegative tensor factorization and completion. SIAM Journal on imaging sciences, 6(3):1758-1789, 2013.
|
| 273 |
+
Zhang, L., Naitzat, G., and Lim, L.-H. Tropical geometry of deep neural networks. In International Conference on Machine Learning, pp. 5824-5832. PMLR, 2018.
|
| 274 |
+
Ziegler, G. M. Lectures on polytopes, volume 152. Springer Science & Business Media, 2012.
|
| 275 |
+
Zou, D. and Gu, Q. An improved analysis of training over-parameterized deep neural networks. arXiv preprint arXiv:1906.04688, 2019.
|
| 276 |
+
|
| 277 |
+
# Checklist
|
| 278 |
+
|
| 279 |
+
1. For all authors...
|
| 280 |
+
|
| 281 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 282 |
+
(b) Did you describe the limitations of your work? [Yes]
|
| 283 |
+
(c) Did you discuss any potential negative societal impacts of your work? [N/A] Our contributions are primarily theoretical.
|
| 284 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 285 |
+
|
| 286 |
+
2. If you are including theoretical results...
|
| 287 |
+
|
| 288 |
+
(a) Did you state the full set of assumptions of all theoretical results? [Yes]
|
| 289 |
+
(b) Did you include complete proofs of all theoretical results? [Yes] We provide proofs in the appendix.
|
| 290 |
+
|
| 291 |
+
3. If you ran experiments...
|
| 292 |
+
|
| 293 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes] We have included this in the supplemental material.
|
| 294 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes]
|
| 295 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [Yes] We include standard deviations for all results in the tables.
|
| 296 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [No]
|
| 297 |
+
|
| 298 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 299 |
+
|
| 300 |
+
(a) If your work uses existing assets, did you cite the creators? [Yes] We cite the datasets and software libraries used.
|
| 301 |
+
(b) Did you mention the license of the assets? [No]
|
| 302 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [No]
|
| 303 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [No] The datasets used (MNIST, Fashion MNIST) are standard in machine learning.
|
| 304 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [No] They are widely used datasets with pictures of handwritten digits and items of clothing.
|
| 305 |
+
|
| 306 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 307 |
+
|
| 308 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
|
| 309 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 310 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
acombinatorialperspectiveontheoptimizationofshallowrelunetworks/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:13106be8ec90b7a2d466853212a32949b3903eef43294faeaae09f531fedf5d2
|
| 3 |
+
size 120496
|
acombinatorialperspectiveontheoptimizationofshallowrelunetworks/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e78ad60e7dd8321e254467d8fec3d3a10e23311f8ca1242507e9919885efc4be
|
| 3 |
+
size 428855
|
acommunicationefficientalgorithmwithlinearconvergenceforfederatedminimaxlearning/782b67b1-2c1a-418c-8e65-adfd5e280fda_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a0cddbfbc852e97d32f604bd4122f3a5b2f6c0cd78a822bc93b1709e20801742
|
| 3 |
+
size 86671
|
acommunicationefficientalgorithmwithlinearconvergenceforfederatedminimaxlearning/782b67b1-2c1a-418c-8e65-adfd5e280fda_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7218d0ae55ede8d8b17202ed3057086179bba0790106c804af6eb66b3e8e0ab6
|
| 3 |
+
size 112092
|
acommunicationefficientalgorithmwithlinearconvergenceforfederatedminimaxlearning/782b67b1-2c1a-418c-8e65-adfd5e280fda_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e514f036c2917103f8fe2c1794dfef87313bfb99bdc9128835bc913697299c8a
|
| 3 |
+
size 412863
|
acommunicationefficientalgorithmwithlinearconvergenceforfederatedminimaxlearning/full.md
ADDED
|
@@ -0,0 +1,462 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Communication-efficient Algorithm with Linear Convergence for Federated Minimax Learning
|
| 2 |
+
|
| 3 |
+
Zhenyu Sun
|
| 4 |
+
|
| 5 |
+
Department of Electrical and Computer Engineering
|
| 6 |
+
|
| 7 |
+
Northwestern University
|
| 8 |
+
|
| 9 |
+
Evanston, IL 60208
|
| 10 |
+
|
| 11 |
+
zhenyusun2026@u.northwestern.edu
|
| 12 |
+
|
| 13 |
+
Ermin Wei
|
| 14 |
+
|
| 15 |
+
Department of Electrical and Computer Engineering
|
| 16 |
+
|
| 17 |
+
Northwestern University
|
| 18 |
+
|
| 19 |
+
Evanston, IL 60208
|
| 20 |
+
|
| 21 |
+
ermin.wei@northwestern.edu
|
| 22 |
+
|
| 23 |
+
# Abstract
|
| 24 |
+
|
| 25 |
+
In this paper, we study a large-scale multi-agent minimax optimization problem, which models many interesting applications in statistical learning and game theory, including Generative Adversarial Networks (GANs). The overall objective is a sum of agents' private local objective functions. We focus on the federated setting, where agents can perform local computation and communicate with a central server. Most existing federated minimax algorithms either require communication per iteration or lack performance guarantees with the exception of Local Stochastic Gradient Descent Ascent (SGDA), a multiple-local-update descent ascent algorithm which guarantees convergence under a diminishing stepsize. By analyzing Local SGDA under the ideal condition of no gradient noise, we show that generally it cannot guarantee exact convergence with constant step sizes and thus suffers from slow rates of convergence. To tackle this issue, we propose FedGDA-GT, an improved Federated (Fed) Gradient Descent Ascent (GDA) method based on Gradient Tracking (GT). When local objectives are Lipschitz smooth and strongly-convex-strongly-concave, we prove that FedGDA-GT converges linearly with a constant stepsize to global $\epsilon$ -approximation solution with $\mathcal{O}(\log(1/\epsilon))$ rounds of communication, which matches the time complexity of centralized GDA method. Then, we analyze the general distributed minimax problem from a statistical aspect, where the overall objective approximates a true population minimax risk by empirical samples. We provide generalization bounds for learning with this objective through Rademacher complexity analysis. Finally, we numerically show that FedGDA-GT outperforms Local SGDA.
|
| 26 |
+
|
| 27 |
+
# 1 Introduction
|
| 28 |
+
|
| 29 |
+
In recent years, minimax learning theory has achieved significant success in attaching relevance to many modern machine learning and statistical learning frameworks, including Generative Adversarial Networks (GANs) [1-3], reinforcement learning [4], adversarial training [5, 6], robust estimation and optimization [7-11], and domain adaptation [12, 13]. Generally, a minimax learning problem is modeled as a game between two players with opposite goals, i.e., one minimizes the objective while the other maximizes it.
|
| 30 |
+
|
| 31 |
+
Most current studies in the field of machine learning are targeted at understanding the minimax problem from the view of the speed of convergence and the accuracy of fixed points. In the centralized setting, gradient descent ascent (GDA), which is an extension of gradient descent (GD), stands out for its simple implementation. Specifically, at each iteration, the "min" player conducts gradient descent over its decision variable while the "max" player performs gradient ascent in contrast. Due to the huge volume of data, stochastic gradient descent ascent (SGDA) is preferred in machine learning settings. Theoretical guarantees are well-established for GDA and SGDA in [14, 15]. However, in practical scenarios, concerns on computation efficiency and data privacy trigger the development of federated learning over a server-client topology and distributed learning over a general graph. These often require communication with the server or neighbors at each iteration [13, 16, 17] and inapplicable to scenarios where communication is expensive.
|
| 32 |
+
|
| 33 |
+
In this work, we focus on solving a minimax problem in Federated Learning (FL) setting with one server and multiple client/agents. In FL, the server hands over computation burden to agents, which perform training algorithms on their local data. The local trained models are then reported to the server for aggregation. This process is repeated with periodic communication. Much existing literature in FL, however, focuses on minimization optimization [18-22]. The limited literature on federated minimax problems either lacks theoretical guarantees or requires frequent communication [13, 23, 24], with the exception of Local Stochastic Gradient Descent Ascent (SGDA) [25]. In Local SGDA, each agent (or client) performs multiple steps of stochastic gradient descent ascent before communication to the server, which then aggregates local models by averaging. Under careful selection of diminishing learning rates, [25, 26] show that Local SGDA converges to the global optimal solution sub-linearly. However, as we show here, when we try to improve the speed of convergence and reduce communication overhead by introducing a constant stepsize to Local SGDA, it fails to converge to the exact optimal solution, even when full gradients are used. Thus Local SGDA can either be fast(with little communication)-but-inaccurate or accurate-but-slow(with much communication). To address this tradeoff between model accuracy and communication efficiency, we develop FedGDA-GT, Federated Gradient Descent Ascent based on Gradient Tracking (FedGDA-GT), and show that it can achieve fast linear convergence while preserving accuracy.
|
| 34 |
+
|
| 35 |
+
In addition to solving the minimax problem, we also study the generalization performance of distributed minimax problems in statistical learning, which measures the influence of sampling on the trained model. In most existing works, generalization analysis is established in the context of empirical risk minimization (ERM) [27, 29]. It is well-known that for generic loss functions, the learning error for centralized ERM is on the order of $\mathcal{O}(1 / \sqrt{N})$ with $N$ denoting the total number of training samples. Recently, several works derive generalization bounds on centralized minimax learning problems with the same order [30-32]. For generalization analysis in distributed minimax learning, learning bounds are only provided for specific scenarios, e.g., agnostic federated learning [13] and multiple-source domain adaptation [12]. In this paper, we provide generalization bounds for distributed empirical minimax learning with the same order as results of centralized cases, generalizing the results in [13].
|
| 36 |
+
|
| 37 |
+
# 1.1 Related work
|
| 38 |
+
|
| 39 |
+
Centralized minimax learning Historically, minimax problems have gained attraction of researchers since several decades ago. An early instantiation is bilinear minimax problem, which becomes a milestone in game theory together with von Neumann's theorem [33]. A simple algorithm is then proposed to solve this bilinear problem efficiently [34]. [35] generalizes von Neumann's theorem to convex-concave games, which triggers an explosion in algorithmic research [36-38]. GDA, as one widely used algorithm, is notable for its simple implementation. It is well-known that GDA can achieve an $\epsilon$ -approximation solution with $\mathcal{O}(\log(1/\epsilon))$ iterations for strongly-convex-strongly-concave games and with $\mathcal{O}(\epsilon^{-2})$ iterations for convex-concave games under diminishing step sizes [14]. Very recently, nonconvex-nonconcave minimax problems appear to be a main focus in optimization and machine learning, due to the emergence of GANs. Several related works are listed therein [39-43].
|
| 40 |
+
|
| 41 |
+
Distributed and federated minimax learning A few recent studies are devoted to distributed minimax problems due to the increasing volume of data and concerns on privacy and security. Algorithm design and convergence behaviors are extensively studied for minimax problems in the context of distributed optimization, where communication is required at each iteration [44-47].
|
| 42 |
+
|
| 43 |
+
In the federated setting, [48] proposes a multiple-local-update algorithm to deal with distribution shift issue. [49] studies federated adversarial training under nonconvex-PL objectives. FedGAN is proposed in [50] to train GANs in a communication-efficient way. However, these works are targeted at some specific scenarios. Very recently, aiming to solve the general federated minimax problems, [25] proposes Local SGDA by allowing each agent performing multiple steps of GDA before communication. The authors also prove sub-linear convergence for Local SGDA under diminishing step sizes. Their convergence guarantees is then improved by [26] to match the results of centralized SGDA [15]. However, we note that all these algorithms require diminishing learning rates to obtain exact solutions, which suffer from relatively slow convergence speed, but our algorithm allows constant step sizes and hence linear convergence can be achieved.
|
| 44 |
+
|
| 45 |
+
Generalization of minimax learning. Recently, generalization properties of minimax learning problems have been widely studied in different scenarios, including GANs and adversarial training. For GANs, [51] analyzes the generalization performance of GANs when discriminators have restricted approximability. [52] evaluates generalization bounds under different metrics. In contrast, [53] suggests a dilemma about GANs' generalization properties. In the context of adversarial training, generalization performances are studied through Rademacher complexity analysis [54, 57], function transformation [55], margin-based [56] approaches. [58] studies the generalization bounds of convex-concave objective functions with Lipschitz continuity. However, all these works are under centralized setting. Recently, [13] provides generalization analysis under agnostic federated learning, where the objective is optimized for any target distribution formed by a mixture of agents' distributions. Our work extends their generalization analysis to general distributed minimax learning problems.
|
| 46 |
+
|
| 47 |
+
# 1.2 Our Contributions.
|
| 48 |
+
|
| 49 |
+
We summarize our main contributions as follows: (1) In federated setting, characterizing the behavior of fixed points of Local SGDA, which reveals the impact of objective heterogeneity and multiple local updates on the model accuracy [see Section 3.1]; (2) Resolving the tradeoff between model accuracy and communication efficiency by developing a linear-rate federated minimax algorithm that guarantees exact convergence [see Section 3.2]; (3) Analyzing the generalization properties of empirical minimax learning in distributed settings through Rademacher complexity analysis [see Section 4]; (4) Providing numerical results which suggest communication efficiency of our algorithm compared to Local SGDA and centralized GDA [see Section 5].
|
| 50 |
+
|
| 51 |
+
Notations. In this paper, we let $\| \cdot \|$ denote $l_{2}$ -norm and $|\cdot|$ denote the cardinality of a set or a collection, or absolute value of a scalar. Vectors are column vectors by default and $z = (x,y)$ forms the concatenated vector with $z = [x^T, y^T]^T$ . Vectors and scalars for agent $i$ are denoted using subscript $i$ , e.g., $f_i(x,y)$ . Superscripts, e.g., $t$ , denote the indices of iterations. We let the gradient of $f(x,y)$ by $\nabla f(x,y) = (\nabla_x f(x,y), \nabla_y f(x,y))$ , where $\nabla_x f(x,y)$ and $\nabla_y f(x,y)$ denote the gradients with respect to $x$ and $y$ , respectively.
|
| 52 |
+
|
| 53 |
+
# 2 Problem Setup
|
| 54 |
+
|
| 55 |
+
In this paper, we consider the general constrained minimax distributed optimization problem collectively solved by $m$ agents:
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
\min _ {x \in \mathcal {X}} \max _ {y \in \mathcal {Y}} \left\{f (x, y) := \frac {1}{m} \sum_ {i = 1} ^ {m} f _ {i} (x, y) \right\}, \tag {1}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
where $\mathcal{X},\mathcal{Y}$ are some compact feasible sets contained in $\mathbb{R}^p$ and $\mathbb{R}^q$ , $x\in \mathcal{X}$ is a $p$ -dimension vector, $y\in \mathcal{Y}$ is a $q$ -dimension vector and $f_{i}(\cdot ,\cdot)$ is the local objective function of agent $i,\forall i = 1\dots ,m$
|
| 62 |
+
|
| 63 |
+
Solving (1) is equivalent to finding a minimax point of $f(x,y)$ , defined as follows:
|
| 64 |
+
|
| 65 |
+
Definition 1. The point $(x^{*},y^{*})$ is said to be a minimax point of $f(x,y)$ if
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
f (x ^ {*}, y) \leq f (x ^ {*}, y ^ {*}) \leq \max _ {y ^ {\prime} \in \mathcal {Y}} f (x, y ^ {\prime}), \forall x \in \mathcal {X}, y \in \mathcal {Y}.
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
The first-order necessary condition for minimax points is given by the following lemma.
|
| 72 |
+
|
| 73 |
+
Lemma 1. [59] Assume $f$ is continuously differentiable. Then, any minimax point $(x^{*},y^{*})$ in the interior of $\mathcal{X}\times \mathcal{Y}$ satisfies
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
\nabla_ {x} f (x ^ {*}, y ^ {*}) = \nabla_ {y} f (x ^ {*}, y ^ {*}) = 0.
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
# 3 FedGDA-GT: A linear-rate algorithm for federated minimax learning
|
| 80 |
+
|
| 81 |
+
In this section, we focus on solving (1) in the federated setting, where the $m$ agents are connected to a central server. In general, the agents' communication with the server is more expensive than local computation. We show that the existing methods either require lots of communication (i.e. the convergence rate is sublinear) or could only converge to an inexact solution with linear rates. This suggests a tradeoff between model accuracy and communication efficiency. Motivated by this phenomenon, we aim at developing a communication-efficient minimax algorithm with linear convergence that preserves model accuracy and low communication overhead simultaneously.
|
| 82 |
+
|
| 83 |
+
We adopt the following standard assumptions on the problem.
|
| 84 |
+
|
| 85 |
+
Assumption 1 ( $\mu$ -strongly-convex-strongly-concave). For any $i = 1, \dots, m$ , $f_i(x, y)$ is twice-differentiable and is $\mu$ -strongly-convex-strongly-concave with some $\mu > 0$ for any $(x, y) \in \mathbb{R}^p \times \mathbb{R}^q$ , i.e.,
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
f o r a n y g i v e n y, \qquad f _ {i} (z, y) \geq f _ {i} (x, y) + \langle \nabla_ {x} f _ {i} (x, y), z - x \rangle + \frac {\mu}{2} \| z - x \| ^ {2}, \forall z, x,
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
f o r \quad f _ {i} (x, z) \leq f (x, y) + \langle \nabla_ {y} f _ {i} (x, y), z - y \rangle - \frac {\mu}{2} \| z - y \| ^ {2}, \forall z, y.
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
Assumption 2 (L-smoothness). There exists some $L > 0$ such that for any $i = 1, \ldots, m$ , $\| \nabla f_i(x, y) - \nabla f_i(x', y') \| \leq L \| (x, y) - (x', y') \|, \forall (x, y), (x', y') \in \mathbb{R}^p \times \mathbb{R}^q$ .
|
| 96 |
+
|
| 97 |
+
We note that although each $f_{i}(x,y)$ may have different $\mu_{i}$ and $L_{i}$ , we can set $\mu = \min_{i=1,\dots,m} \mu_{i}$ and $L = \max_{i=1,\dots,m} L_{i}$ to ensure Assumptions 1 and 2 hold.
|
| 98 |
+
|
| 99 |
+
# 3.1 Analysis on Local SGDA
|
| 100 |
+
|
| 101 |
+
We first study Local Stochastic Gradient Descent Ascent (SGDA) proposed in [25], which is the only known method with convergence guarantees and can utilize multiple local computation steps before communicating with the central server for solving general federated minimax problems. In particular, in each iteration of Local SGDA, each agent updates its local model $(x_{i},y_{i})$ for $K$ times by using local stochastic gradients before communication, and then sends its local model to the server, which then computes the average of local models and sends it back.
|
| 102 |
+
|
| 103 |
+
In [25], the authors prove that under Assumptions 1-2, with bounded variance assumption on all local gradients and vanishing learning rate, after $T$ rounds of communication, the convergence result of Local SGDA is
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\mathbb {E} \left[ \| x ^ {(T)} - x ^ {*} \| ^ {2} + \| y ^ {(T)} - y ^ {*} \| ^ {2} \right] \leq \mathcal {O} (T ^ {- 1}) + \mathcal {O} (T ^ {- 3}). \tag {2}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
This shows the slow sublinear convergence rate of this method, which translates to large amount of communication. We next consider an ideal deterministic version of Local SGDA (Algorithm 1), where local stochastic gradients are replaced with full gradients.
|
| 110 |
+
|
| 111 |
+
# Algorithm 1 Local SGDA
|
| 112 |
+
|
| 113 |
+
Input: $(x^0,y^0)$ as initialization of the server
|
| 114 |
+
|
| 115 |
+
1: for $t = 0, 1, \ldots, T$ do
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
2: \quad x _ {i, 0} ^ {t + 1} = x ^ {t}, \quad y _ {i, 0} ^ {t + 1} = y ^ {t}, \forall i = 1, \dots , m
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
3: for $k = 0,1,\ldots ,K - 1$ do (in parallel for all agents)
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
4: \quad x _ {i, k + 1} ^ {t + 1} = x _ {i, k} ^ {t + 1} - \eta_ {x} \nabla_ {x} f _ {i} \left(x _ {i, k} ^ {t + 1}, y _ {i, k} ^ {t + 1}\right)
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
5: \quad y _ {i, k + 1} ^ {t + 1} = y _ {i, k} ^ {t + 1} + \eta_ {y} \nabla_ {y} f _ {i} \left(x _ {i, k} ^ {t + 1}, y _ {i, k} ^ {t + 1}\right)
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
6: end for
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
7: \quad x ^ {t + 1} = \frac {1}{m} \sum_ {i = 1} ^ {m} x _ {i, K} ^ {t + 1}, \quad y ^ {t + 1} = \frac {1}{m} \sum_ {i = 1} ^ {m} y _ {i, K} ^ {t + 1}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
8: end for
|
| 138 |
+
|
| 139 |
+
Output: $(x^{T},y^{T})$ given by the server
|
| 140 |
+
|
| 141 |
+
In order to rewrite Local SGDA in a concise form, we define the operator for gradient descent evaluated at $(x,y)$ for agent $i$ as
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\mathcal {D} _ {i} ^ {1} (x, y) = x - \eta_ {x} \nabla_ {x} f _ {i} (x, y), \forall i = 1, \dots , m.
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
Given some $k \geq 1$ , let $\mathcal{D}_i^k$ define the composition of $k \mathcal{D}_i^1$ operators. Moreover, $\mathcal{D}_i^0 (x,y) = x$ is the identity operator on the first argument. Let $\mathcal{A}_i^1 (x,y) = y + \eta_y\nabla_yf_i(x,y)$ be the gradient ascent operator with $\mathcal{A}_i^k (x,y)$ and $\mathcal{A}_i^0 (x,y) = y$ defined similarly. Then, given some initial point $(x^0,y^0)$ , Algorithm 1 is rewritten as the recursion of the following for $t = 0,1,\ldots ,T$ :
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\tilde {x} _ {i} ^ {t + 1} = \mathcal {D} _ {i} ^ {K} (x ^ {t}, y ^ {t}), \quad \tilde {y} _ {i} ^ {t + 1} = \mathcal {A} _ {i} ^ {K} (x ^ {t}, y ^ {t}), \forall i = 1, \ldots , m
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
$$
|
| 154 |
+
x ^ {t + 1} = \frac {1}{m} \sum_ {i = 1} ^ {m} \tilde {x} _ {i} ^ {t + 1}, \quad y ^ {t + 1} = \frac {1}{m} \sum_ {i = 1} ^ {m} \tilde {y} _ {i} ^ {t + 1}. \tag {3}
|
| 155 |
+
$$
|
| 156 |
+
|
| 157 |
+
The following result shows that Local SGDA generally cannot guarantee the convergence to the optimal solution of (1) with fixed step sizes even with full gradients.
|
| 158 |
+
|
| 159 |
+
Proposition 1. Suppose $f_{i}$ is differentiable, $\forall i = 1,\dots ,m$ . For any $K\geq 1$ , let $\{(x^{t},y^{t})\}$ be the sequence generated by (3) (or equivalently by Algorithm 1). If $\{(x^t,y^t)\}$ converges to a fixed point $(x^{*},y^{*})$ , then
|
| 160 |
+
|
| 161 |
+
$$
|
| 162 |
+
\frac {1}{m} \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \nabla f _ {i} \left(\mathcal {D} _ {i} ^ {k} \left(x ^ {*}, y ^ {*}\right), \mathcal {A} _ {i} ^ {k} \left(x ^ {*}, y ^ {*}\right)\right) = 0.
|
| 163 |
+
$$
|
| 164 |
+
|
| 165 |
+
Note that when $K = 1$ , Local SGDA reduces to GDA and the fixed point $(x^{*},y^{*})$ satisfies
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
\frac {1}{m} \sum_ {i = 1} ^ {m} \nabla_ {x} f _ {i} (x ^ {*}, y ^ {*}) = \frac {1}{m} \sum_ {i = 1} ^ {m} \nabla_ {y} f _ {i} (x ^ {*}, y ^ {*}) = 0
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
which meets the optimality condition (by Lemma 1) of the minimax point of $f(x,y)$ . For any other $K$ , the fixed point characterization is different from the minimax point. An illustrative example is provided in Appendix C.
|
| 172 |
+
|
| 173 |
+
We mention that while having multiple local gradient steps in the inner loop allows the agents to reduce the frequency of communication, it also pulls the agents towards their local minimax points as opposed to the global one. Motivated by this observation, we next propose a modification involving a gradient correction term, also known as gradient tracking (GT), to make sure the agents are moving towards the global minimax point.
|
| 174 |
+
|
| 175 |
+
# 3.2 FedGDA-GT and convergence guarantees
|
| 176 |
+
|
| 177 |
+
As indicated in Section 3.1, Local SGDA cannot guarantee exact convergence under fixed step sizes, which implies a tradeoff between communication efficiency and model accuracy. Aiming at resolving this issue, in this section, we introduce our algorithm FedGDA-GT, formally described in Algorithm 2, that can reach the optimal solution of (1) with fixed step sizes, full gradients and the following assumption.
|
| 178 |
+
|
| 179 |
+
Assumption 3. The feasible sets $\mathcal{X}$ and $\mathcal{Y}$ are compact and convex. Moreover, there is at least one minimax point of $f(x,y)$ lying in $\mathcal{X} \times \mathcal{Y}$ .
|
| 180 |
+
|
| 181 |
+
$\operatorname{Proj}_{\mathcal{X}}(\cdot)$ and $\operatorname{Proj}_{\mathcal{Y}}(\cdot)$ denote the projection operators, i.e.,
|
| 182 |
+
|
| 183 |
+
$$
|
| 184 |
+
\operatorname {P r o j} _ {\mathcal {X}} (x) = \arg \min _ {z \in \mathcal {X}} \| z - x \| \text {a n d} \operatorname {P r o j} _ {\mathcal {Y}} (y) = \arg \min _ {z \in \mathcal {Y}} \| z - y \|,
|
| 185 |
+
$$
|
| 186 |
+
|
| 187 |
+
which are well defined by the previous assumption and are used to guarantee the output of Algorithm 2 is feasible. The main differences between FedGDA-GT and Local-SGDA are the correction terms of $-\nabla_{x}f_{i}(x^{t},y^{t}) + \nabla_{x}f(x^{t},y^{t})$ and $-\nabla_{y}f_{i}(x^{t},y^{t}) + \nabla_{y}f(x^{t},y^{t})$ , which track the differences between local and global gradients. By including the gradient correction terms, when $(x_{i,k}^{t+1},y_{i,k}^{t+1})$ is not too far from $(x^{t},y^{t})$ , we can expect $\nabla_{x}f_{i}(x_{i,k}^{t+1},y_{i,k}^{t+1}) \approx \nabla_{x}f_{i}(x^{t},y^{t})$ and similarly for $y$ gradients. Hence the updates reduce to simply taking global gradient descent and ascent steps, which coincide with centralized GDA updates and thus would have the correct fixed points. We next show the convergence guarantee of FedGDA-GT.
|
| 188 |
+
|
| 189 |
+
We first establish the uniqueness of minimax point of $f(x,y)$ by the following lemma.
|
| 190 |
+
|
| 191 |
+
Algorithm 2 FedGDA-GT
|
| 192 |
+
Input: $(x^0,y^0)$ as initialization of the global model
|
| 193 |
+
1: for $t = 0,1,\ldots ,T$ do
|
| 194 |
+
2: Server broadcasts $(x^{t},y^{t})$
|
| 195 |
+
3: Agents compute $(\nabla_{x}f_{i}(x^{t},y^{t}),\nabla_{y}f_{i}(x^{t},y^{t}))$ and send it to the server
|
| 196 |
+
4: Server computes $(\nabla_{x}f(x^{t},y^{t}),\nabla_{y}f(x^{t},y^{t}))$ and broadcasts it
|
| 197 |
+
5: Each agent $i$ for $i = 1,\dots ,m$ sets $x_{i,0}^{t + 1} = x^t$ $y_{i,0}^{t + 1} = y^{t}$
|
| 198 |
+
6: for $k = 0,1,\dots ,K - 1$ do (in parallel for all agents)
|
| 199 |
+
7: $x_{i,k + 1}^{t + 1} = x_{i,k}^{t + 1} - \eta (\nabla_{x}f_{i}(x_{i,k}^{t + 1},y_{i,k}^{t + 1}) - \nabla_{x}f_{i}(x^{t},y^{t}) + \nabla_{x}f(x^{t},y^{t}))$
|
| 200 |
+
8: $y_{i,k + 1}^{t + 1} = y_{i,k}^{t + 1} + \eta (\nabla_{y}f_{i}(x_{i,k}^{t + 1},y_{i,k}^{t + 1}) - \nabla_{y}f_{i}(x^{t},y^{t}) + \nabla_{y}f(x^{t},y^{t}))$
|
| 201 |
+
9: end for
|
| 202 |
+
10: All agents send $(x_{i,K}^{t + 1},y_{i,K}^{t + 1})$ to the server to compute $(x^{t + 1},y^{t + 1})$ by
|
| 203 |
+
11: $x^{t + 1} = \mathrm{Proj}_{\mathcal{X}}\left(\frac{1}{m}\sum_{i = 1}^{m}x_{i,K}^{t + 1}\right), y^{t + 1} = \mathrm{Proj}_{\mathcal{Y}}\left(\frac{1}{m}\sum_{i = 1}^{m}y_{i,K}^{t + 1}\right)$
|
| 204 |
+
12: end for
|
| 205 |
+
Output: $(x^T,y^T)$ given by the server
|
| 206 |
+
|
| 207 |
+
Lemma 2. Under Assumptions 1 and 3, $(x^{*},y^{*})$ is the unique minimax point of $f(x,y)$ if and only if
|
| 208 |
+
|
| 209 |
+
$$
|
| 210 |
+
\nabla_ {x} f (x ^ {*}, y ^ {*}) = \nabla_ {y} f (x ^ {*}, y ^ {*}) = 0.
|
| 211 |
+
$$
|
| 212 |
+
|
| 213 |
+
Next, we state the main convergence result of our algorithm.
|
| 214 |
+
|
| 215 |
+
Theorem 1. Suppose Assumptions 1, 2, 3 are satisfied. Let $\{(x^t,y^t)\}_{t = 0}^{\infty}$ be a sequence generated by Algorithm 2. Then there exists a scalar $\eta_0 > 0$ , such that for any stepsize $\eta \in (0,\eta_0)$
|
| 216 |
+
|
| 217 |
+
$$
|
| 218 |
+
\left\| x ^ {t} - x ^ {*} \right\| ^ {2} + \left\| y ^ {t} - y ^ {*} \right\| ^ {2} \leq \rho (\eta) ^ {t} \left(\left\| x ^ {0} - x ^ {*} \right\| ^ {2} + \left\| y ^ {0} - y ^ {*} \right\| ^ {2}\right), \forall t = 0, 1, \dots ,
|
| 219 |
+
$$
|
| 220 |
+
|
| 221 |
+
where $(x^{*},y^{*})$ is the unique minimax point of $f(x,y)$ and $\rho (\eta)$ is some scalar in $(0,1)$ .
|
| 222 |
+
|
| 223 |
+
Theorem 1 guarantees linear convergence of FedGDA-GT (Algorithm 2) to the correct optimal solution of (1) under suitable choices of stepsize $\eta$ . We note that linear convergence here is with respect to the outer-loop (indexed by $t$ ), as the inner-loop (indexed by $k$ ) can be implemented cheaply without any communication. The fast convergence speed and exact convergence of this result eliminate the tradeoff between model accuracy and communication efficiency. Furthermore, no restriction on heterogeneity level of local objectives is placed. For the homogeneous setting, we show in Appendix D.4 that the convergence rate of FedGDA-GT can be improved at least $K$ times, compared to heterogeneous setting.
|
| 224 |
+
|
| 225 |
+
# 4 Generalization bounds on minimax learning problems
|
| 226 |
+
|
| 227 |
+
In this section, we consider minimax statistical learning, an important application of the minimax framework. We view the problem (1) in Section 3 as the empirical version of a population minimax optimization problem. We evaluate the generalization performance of the empirical problem using Rademacher complexity.
|
| 228 |
+
|
| 229 |
+
To be more specific, in a minimax statistical learning task, each agent is allocated with some local dataset $S_{i} = \{\xi_{i,j}\}_{j=1}^{n_{i}}$ , where $\xi_{i,j}$ denotes the $j$ th sample of agent $i$ and $n_{i}$ denotes the number of local samples. $S = \bigcup_{i=1}^{m} S_{i}$ is the dataset with all samples. Moreover, we assume $n_{i} = n, \forall i = 1, \ldots, m$ and $N = mn$ is the total number of samples. Then, each local objective function $f_{i}(x,y)$ is defined by using local data, i.e.,
|
| 230 |
+
|
| 231 |
+
$$
|
| 232 |
+
f _ {i} (x, y) = \frac {1}{n} \sum_ {j = 1} ^ {n} l (x, y; \xi_ {i, j}), \tag {4}
|
| 233 |
+
$$
|
| 234 |
+
|
| 235 |
+
where $l(x,y;\xi_{i,j})$ is the loss function measured at point $\xi_{i,j}$ and (4) is called local empirical minimax risk. Suppose that for agent $i$ each data sample $\xi_{i,j}$ is independently drawn from some underlying distribution $P_i$ , denoted by $\xi \sim P_i$ . We can further define the local population minimax risk as follows:
|
| 236 |
+
|
| 237 |
+
$$
|
| 238 |
+
R _ {i} (x, y) = \mathbb {E} _ {\xi \sim P _ {i}} [ l (x, y; \xi) ] \tag {5}
|
| 239 |
+
$$
|
| 240 |
+
|
| 241 |
+
and similarly, the global population minimax risk is defined by
|
| 242 |
+
|
| 243 |
+
$$
|
| 244 |
+
R (x, y) = \mathbb {E} _ {\xi \sim P} [ l (x, y; \xi) ], \tag {6}
|
| 245 |
+
$$
|
| 246 |
+
|
| 247 |
+
where $P$ is the underlying distribution of the whole dataset $S$ . Then, the minimax problem based on the population minimax risk is given by
|
| 248 |
+
|
| 249 |
+
$$
|
| 250 |
+
\min _ {x \in \mathcal {X}} \max _ {y \in \mathcal {Y}} R (x, y). \tag {7}
|
| 251 |
+
$$
|
| 252 |
+
|
| 253 |
+
Sample applications of our formulation (4)-(7) can be found in Appendix A.
|
| 254 |
+
|
| 255 |
+
While we are interested in calculating the population minimax risk, in practice we can only solve (1), which is a sampled version of (7), since the true distribution $P$ is unknown. In this sense, how could we expect the optimal solution of (1) performs successfully on (6)? To answer this question, we provide the generalization bound which measures the performance of the model trained on the empirical minimax risk $f(x,y)$ . Our generalization bound is based on the notion of Rademacher complexity [29] defined by
|
| 256 |
+
|
| 257 |
+
$$
|
| 258 |
+
\mathcal {R} (\mathcal {X}, y) = \mathbb {E} _ {\xi \sim P} \mathbb {E} _ {\sigma} \left[ \sup _ {x \in \mathcal {X}} \frac {1}{m n} \sum_ {i = 1} ^ {m} \sum_ {j = 1} ^ {n} \sigma_ {i, j} l (x, y; \xi_ {i, j}) \right], \tag {8}
|
| 259 |
+
$$
|
| 260 |
+
|
| 261 |
+
where $\sigma = \{\sigma_{i,j}\}, \forall i = 1, \dots, m, \forall j = 1, \dots, n$ , is a collection of Rademacher variables taking values from $\{-1, 1\}$ uniformly. Basically, the Rademacher complexity (8) captures the capability of $\mathcal{X}$ to fit random sign noise, i.e., $\sigma$ . Note that $\sigma_{i,j} l(x,y; \xi_{i,j})$ measures how well the loss $l$ correlates with noise $\sigma$ on sample space $S$ . By taking the supremum, it means what the best extent the feasible set $\mathcal{X}$ on average can correlate with random noise $\sigma$ . Thus, if $\mathcal{X}$ is richer, then the Rademacher complexity is bigger. Moreover, the minimax Rademacher complexity is defined on $\mathcal{Y}$ by
|
| 262 |
+
|
| 263 |
+
$$
|
| 264 |
+
\mathscr {R} (\mathcal {X}, \mathcal {Y}) = \max _ {y \in \mathcal {Y}} \mathscr {R} (\mathcal {X}, y). \tag {9}
|
| 265 |
+
$$
|
| 266 |
+
|
| 267 |
+
Given $\epsilon > 0$ , we further define the $\epsilon$ -minimum cover of $\mathcal{V}$ in $l_{2}$ distance by
|
| 268 |
+
|
| 269 |
+
$$
|
| 270 |
+
\mathcal{Y}_{\epsilon} = \arg \min_{C(\mathcal{Y},\epsilon)}\bigl|C(\mathcal{Y},\epsilon)\bigr|,
|
| 271 |
+
$$
|
| 272 |
+
|
| 273 |
+
where $C(\mathcal{V},\epsilon) = \{B_{\epsilon}(y):y\in \mathcal{V}\}$ is a collection of open balls $B_{\epsilon}(y)$ centered at $y$ with radius $\epsilon$ such that for any $y\in \mathcal{V}$ there exists some $B_{\epsilon}(y^{\prime})\in C(\mathcal{V},\epsilon)$ with $y\in B_{\epsilon}(y^{\prime})$ . Note that $|\mathcal{V}_{\epsilon}| < \infty$ since $\mathcal{V}$ is compact and thus every open cover of $\mathcal{V}$ has a finite subcover.
|
| 274 |
+
|
| 275 |
+
Then, we have the following generalization bound for the distributed minimax learning problem:
|
| 276 |
+
|
| 277 |
+
Theorem 2. Suppose $|l(x,y;\xi) - l(x,y';\xi)| \leq L_y\| y - y'\|$ and $|l(x,y;\xi)| \leq M_i(y), \forall x \in \mathcal{X}, \forall y,y' \in \mathcal{Y}_\epsilon$ and $\forall \xi \sim P_i, i = 1,\ldots,m$ with some positive scalar $L_{y}$ and real-valued function $M_{i}(y) > 0$ . Then, given any $\epsilon > 0$ and $\delta > 0$ , with probability at least $1 - \delta$ for any $(x,y) \in \mathcal{X} \times \mathcal{Y}$ ,
|
| 278 |
+
|
| 279 |
+
$$
|
| 280 |
+
R (x, y) \leq f (x, y) + 2 \mathcal {R} (\mathcal {X}, y) + \sqrt {\sum_ {i = 1} ^ {m} \frac {M _ {i} ^ {2} (y)}{2 m ^ {2} n} \log \frac {| \mathcal {Y} _ {\epsilon} |}{\delta}} + 2 L _ {y} \epsilon . \tag {10}
|
| 281 |
+
$$
|
| 282 |
+
|
| 283 |
+
Theorem 2 generally states that given any $x, y$ feasible, it is highly possible that the distance between the global population minimax risk $R(x, y)$ and the global empirical minimax risk $f(x, y)$ can be bounded by Rademacher complexity and a term related to the number of agents and local sample size. Moreover, we allow the upper bound of $l(\cdot, y)$ can depend on different choices of $y$ and agents since different local distributions always have different effects on the value of $l(\cdot, y)$ .
|
| 284 |
+
|
| 285 |
+
Based on Theorem 2, we further derive the high-probability bound on $\max_{y\in \mathcal{Y}}R(x,y)$
|
| 286 |
+
|
| 287 |
+
Corollary 1. Under the same conditions of Theorem 2, with probability at least $1 - \delta$ for any $x\in \mathcal{X}$ the following inequality holds for any $\epsilon >0$ $\delta >0$ ..
|
| 288 |
+
|
| 289 |
+
$$
|
| 290 |
+
Q (x) \leq g (x) + 2 \mathcal {R} (\mathcal {X}, \mathcal {Y}) + \sqrt {\max _ {y \in \mathcal {Y}} \left\{\sum_ {i = 1} ^ {m} \frac {M _ {i} ^ {2} (y)}{2 m ^ {2} n} \right\} \log \frac {| \mathcal {Y} _ {\epsilon} |}{\delta}} + 2 L _ {y} \epsilon \tag {11}
|
| 291 |
+
$$
|
| 292 |
+
|
| 293 |
+
where $Q(x) = \max_{y\in \mathcal{Y}}R(x,y)$ , $g(x) = \max_{y\in \mathcal{Y}}f(x,y)$ are the worst-case population and empirical risks, respectively.
|
| 294 |
+
|
| 295 |
+
We can further bound $\mathcal{R}(\mathcal{X},\mathcal{Y})$ when the loss function $l(x,y;\xi)$ takes finite number of values for any $(x,y)\in \mathcal{X}\times \mathcal{Y}$ and any data samples.
|
| 296 |
+
|
| 297 |
+
Lemma 3. Suppose for any $y \in \mathcal{Y}$ and $i \in \{1, \dots, m\}$ , $|l(\cdot, y; \cdot)|$ is bounded by $M_i(y)$ and takes finite number of values. Further, assume that the VC-dimension of $\mathcal{X}$ is $d$ . Then, the following inequality holds:
|
| 298 |
+
|
| 299 |
+
$$
|
| 300 |
+
\mathcal {R} (\mathcal {X}, \mathcal {Y}) \leq \sqrt {2 d \max _ {y \in \mathcal {Y}} \left\{\sum_ {i = 1} ^ {m} \frac {M _ {i} ^ {2} (y)}{m ^ {2} n} \right\} \left(1 + \log \frac {m n}{d}\right)}. \tag {12}
|
| 301 |
+
$$
|
| 302 |
+
|
| 303 |
+
Corollary 1 measures the error between the worst-case population and empirical risks, that is $\max_{y\in \mathcal{Y}}R(x,y) - \max_{y\in \mathcal{Y}}f(x,y)$ . A smaller error indicates a better performance of the model, which is trained empirically, generalized on the underlying distribution $P$ . Combining Lemma 3, we know that this worst-case error can be bounded by some decreasing function with respect to sample size. Thus, in order to get a better generalization performance, one effective way is to draw more local samples for each agent.
|
| 304 |
+
|
| 305 |
+
It it worth noting that the generalization bound is related to the term $\sum_{i=1}^{m} M_i^2(y)$ as shown in (10)-(12), which essentially measures the effect of feasible set $\mathcal{V}$ by means of different local data distributions on each agent. This also captures the heterogeneity of agents.
|
| 306 |
+
|
| 307 |
+
In fact, the bounds (10)-(12) we proposed are generalized versions of those in [13], which essentially include them as special cases by selecting suitable $M_{i}(y)$ . Specifically, noting that in [13], the global population risk takes the form of $R(x,y) = \sum_{i=1}^{m} y_{i} R_{i}(x)$ with $y_{i} \geq 0$ and $\sum_{i=1}^{m} y_{i} = 1$ , then by choosing $M_{i}(y) = my_{i}M$ with some $M > 0$ we recover the same result. Moreover, compared to [30, 31], where only centralized minimax learning problems are considered, our bounds have the same order of complexity $\mathcal{O}(1/\sqrt{N})$ with $N$ denoting the total sample size by taking the uniform bound on $l(\cdot)$ for all agents.
|
| 308 |
+
|
| 309 |
+
# 5 Experiments
|
| 310 |
+
|
| 311 |
+
In this section, we numerically measure the performance of FedGDA-GT compared to Local SGDA with full gradients on a personal laptop by solving (1). We consider first perform experiments on quadratic objective functions with $x$ and $y$ uncoupled. Then, we test our algorithm on the robust linear regression problem. In both cases, FedGDA-GT performs much better than Local SGDA with heterogeneous local objectives.
|
| 312 |
+
|
| 313 |
+
# 5.1 Uncoupled quadratic objective functions
|
| 314 |
+
|
| 315 |
+
We first consider the following local objective functions:
|
| 316 |
+
|
| 317 |
+
$$
|
| 318 |
+
f _ {i} (x, y) = \frac {1}{2} x ^ {T} A _ {i} ^ {T} A _ {i} x - \frac {1}{2} y ^ {T} A _ {i} ^ {T} A _ {i} y + \left(A _ {i} ^ {T} b _ {i}\right) ^ {T} (2 x - y), \forall i = 1, \dots , m, \tag {13}
|
| 319 |
+
$$
|
| 320 |
+
|
| 321 |
+
where $x, y \in \mathbb{R}^d$ and $A_i \in \mathbb{R}^{n_i \times d}$ with $n_i$ representing the number of samples of agent $i$ . We generate $A_i, b_i$ as follows:
|
| 322 |
+
|
| 323 |
+
For each agent, every entry of $A_{i}$ , denoted by $[A_i]_{kl}$ , is generated by Gaussian distribution $\mathcal{N}(0,(0.5i)^{-2})$ . To construct $b_{i}$ , we generate a random reference point $\theta_{i} \in \mathbb{R}^{d}$ , where $\theta_{i} \sim \mathcal{N}(\mu_{i},I_{d\times d})$ . Each element of $\mu_{i}$ is drawn from $\mathcal{N}(\alpha ,1)$ with $\alpha \sim \mathcal{N}(0,100)$ . Then $b_{i} = A_{i}\theta_{i} + \epsilon_{i}$ with $\epsilon_{i} \sim \mathcal{N}(0,0.25I_{n_{i}\times n_{i}})$ . We set the dimension of model as $d = 50$ and number of samples as $n_i = 500$ and train the models with $m = 20$ agents by Algorithm 1 and Algorithm 2, respectively. In order to compare them, the learning rate is $10^{-4}$ for both algorithms and we choose Local SGDA with $K = 1$ , which is equivalent to a centralized GDA, as the baseline. Figure 1 shows the trajectories of Algorithms 1 and 2 under objective functions constructed by (13), respectively. Different numbers of local updates are selected (with $K = 20$ and $K = 50$ ). In this heterogeneous setting, we can see that FedGDA-GT achieves linear convergence, converging to a more accurate solution with significantly fewer rounds of communication, compared with Local SGDA and centralized GDA. Moreover, our numerical results suggest that Local SGDA may converge to a non-optimal point (optimality gap over $10^{4}$ ), which conforms with our Theorem 1.
|
| 324 |
+
|
| 325 |
+

|
| 326 |
+
(a) $K = 20$
|
| 327 |
+
|
| 328 |
+

|
| 329 |
+
(b) $K = 50$
|
| 330 |
+
Figure 1: Local SGDA and FedGDA-GT with constant stepsizes under different numbers of local updates
|
| 331 |
+
|
| 332 |
+
# 5.2 Robust linear regression
|
| 333 |
+
|
| 334 |
+
Next, we consider the problem of robust linear regression, which is widely studied in estimation with gross error [28, 57]. As the same formulation in [25], each agent's loss function is defined by
|
| 335 |
+
|
| 336 |
+
$$
|
| 337 |
+
f _ {i} (x, y) = \frac {1}{n _ {i}} \sum_ {j = 1} ^ {n _ {i}} \left(x ^ {T} \left(a _ {i, j} + y\right) - b _ {i, j}\right) ^ {2} + \frac {1}{2} \| x \| ^ {2}, \forall i = 1, \dots , m, \tag {14}
|
| 338 |
+
$$
|
| 339 |
+
|
| 340 |
+
where $(a_{i,j},b_{i,j})$ is the $j$ th data sample of agent $i$ , $n_i$ is local sample size. Specifically, in (14), $x\in \mathbb{R}^d$ represents the model of linear regression and $y\in \mathbb{R}^d$ represents the gross noise aimed at contaminating each sample. We assume that there is an upper bound on the noise, i.e., $\| y\| \leq 1$ . By solving $\min_{x\in \mathbb{R}^d}\max_{\| y\| \leq 1}\frac{1}{m}\sum_{i = 1}^{m}f_i(x,y)$ , we obtain a global robust model of the linear regression problem even under the worst contamination of gross noise. To measure the convergence of algorithms, we use the robust loss, i.e., given a model $\hat{x}$ , the corresponding robust loss [25, 26] is defined by $\tilde{f} (\hat{x}) = \max_{\| y\| \leq 1}\sum_{i = 1}^{m}f_{i}(\hat{x},y)$ .
|
| 341 |
+
|
| 342 |
+
We generate local models and data as follows: the local model $x_{i}^{*}$ is generated by a multivariate normal distribution. The output for agent $i$ is given by $b_{i,j} = (x_i^*)^T a_{i,j} + \epsilon_j$ with $\epsilon_{j}\sim \mathcal{N}(0,1)$ . Each input point $a_{i,j}$ is with dimension $d$ and drawn from a Gaussian distribution $a_{i,j}\sim \mathcal{N}(\mu_i,K_i)$ where $\mu_{i}\sim \mathcal{N}(c_{i},I_{d\times d})$ and $K_{i} = i^{-1.3}I_{d\times d}$ . Each element of $c_{i}$ is drawn from $\mathcal{N}(0,\alpha^2)$ . By choosing different $\alpha$ , we control the heterogeneity of local data and hence $f_{i}(x,y)$ .
|
| 343 |
+
|
| 344 |
+
In this experiment, we compare Algorithms 1 and 2 under different heterogeneity levels, i.e., $\alpha = 1$ , $\alpha = 5$ and $\alpha = 20$ . For each case, we choose the same constant $\eta$ for both Local SGDA and FedGDA-GT. As shown in Figure 2, when local agents are more heterogeneous, FedGDA-GT performs better than Local SGDA, which lies not only in faster convergence but also smaller robust loss. Specifically, when $\alpha = 1$ , two algorithms almost have the same performance. To explain this phenomenon, let us recall FedGDA-GT again. Smaller $\alpha$ essentially means more similar local objectives. In particular, $\alpha = 0$ corresponds to i.i.d. cases. In this sense, the local updates of FedGDA-GT become the same as that in Local SGDA, which indicates similar performance as shown in Figure 2(a).
|
| 345 |
+
|
| 346 |
+

|
| 347 |
+
(a) $\alpha = 1$
|
| 348 |
+
Figure 2: Local SGDA and FedGDA-GT under different heterogeneity levels
|
| 349 |
+
|
| 350 |
+

|
| 351 |
+
(b) $\alpha = 5$
|
| 352 |
+
|
| 353 |
+

|
| 354 |
+
(c) $\alpha = 20$
|
| 355 |
+
|
| 356 |
+
# 6 Conclusion
|
| 357 |
+
|
| 358 |
+
In this paper, we investigate the federated minimax learning problem. We first characterize the fixed-point behavior of a recent algorithm Local SGDA to show that it presents a tradeoff between communication efficiency and model accuracy and cannot achieve linear convergence under constant learning rates. To resolve this issue, we propose FedGDA-GT that guarantees exact linear convergence and reaches $\epsilon$ -optimality with $\mathcal{O}(\log(1/\epsilon))$ time, which is the same as centralized GDA method. Then, we study the generalization properties of distributed minimax learning problems. We establish generalization error bounds without strong assumptions on local distributions and loss functions based on Rademacher complexity. The bounds match existing results of centralized minimax learning problems. Finally, we compare FedGDA-GT with two state-of-the-art algorithms, Local SGDA and GDA, through numerical experiments and show that FedGDA-GT outperforms in efficiency and/or accuracy.
|
| 359 |
+
|
| 360 |
+
# Acknowledgments and Disclosure of Funding
|
| 361 |
+
|
| 362 |
+
This work was supported by the NSF NRI 2024774.
|
| 363 |
+
|
| 364 |
+
# References
|
| 365 |
+
|
| 366 |
+
[1] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. Advances in Neural Information Processing Systems, 27, 2014.
|
| 367 |
+
[2] Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C. Courville. Improved training of Wasserstein GANs. Advances in Neural Information Processing Systems, 30, 2017.
|
| 368 |
+
[3] Xudong Mao, Qing Li, Haoran Xie, Raymond Y.K. Lau, Zhen Wang, and Stephen Paul Smolley. Least squares generative adversarial networks. Proceedings of the IEEE International Conference on Computer Vision, pp. 2794-2802, 2017.
|
| 369 |
+
[4] Shayegan Omidshafiei, Jason Pazis, Christopher Amato, Jonathan P How, and John Vian. Deep decentralized multi-task multi-agent reinforcement learning under partial observability. arXiv preprint arXiv:1703.06182, 2017.
|
| 370 |
+
[5] Aman Sinha, Hongseok Namkoong, and John Duchi. Certifiable distributional robustness with principled adversarial training. International Conference on Learning Representations, 2017.
|
| 371 |
+
[6] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. International Conference on Learning Representations, 2018.
|
| 372 |
+
[7] Nam H. Nguyen and Trac D. Tran. Robust lasso with missing and grossly corrupted observations. IEEE Transactions on Information Theory, 59(4): 2036-2058, 2013.
|
| 373 |
+
[8] Hongseok Namkoong and John C. Duchi. Stochastic gradient methods for distributionally robust optimization with f-divergences. Advances in Neural Information Processing Systems, 29, 2016.
|
| 374 |
+
[9] Hongseok Namkoong and John C. Duchi. Variance-based regularization with convex objectives. Advances in Neural Information Processing Systems, 30, 2017.
|
| 375 |
+
[10] Hongseok Namkoong and John C. Duchi. Learning models with uni-form performance via distributionally robust optimization. arXiv preprint arXiv:1810.08750, 2018.
|
| 376 |
+
[11] Shiori Sagawa, Pang Wei Koh, Tatsunori B. Hashimoto, and Percy Liang. Distributionally robust neural networks for group shifts: On the importance of regularization for worst-case generalization. International Conference on Learning Representations, 2020.
|
| 377 |
+
[12] Han Zhao, Shanghang Zhang, Guanhang Wu, José M. F. Moura, Joao P. Costeira, and Geoffrey J. Gordon. Adversarial multiple source domain adaptation. Advances in Neural Information Processing Systems, 31, 2018.
|
| 378 |
+
[13] Mehryar Mohri, Gary Sivek, and Ananda Theertha Suresh. Agnostic federated learning. International Conference on Machine Learning, pp. 4615-4625. PMLR, 2019.
|
| 379 |
+
|
| 380 |
+
[14] Angela Nedic and Asuman Ozdaglar. Subgradient methods for saddle-point problems. Journal of Optimization Theory and Applications, pp. 205-228, 2009.
|
| 381 |
+
[15] Tianyi Lin, Chi Jin, and Michael Jordan. On gradient descent ascent for nonconvex-concave minimax problems. International Conference on Machine Learning, 119:6083-6093. PMLR, 2020.
|
| 382 |
+
[16] Ohad Shamir, Nathan Srebro, and Tong Zhang. Communication efficient distributed optimization using an approximate Newton-type method. International Conference on Machine Learning, 32(2):1000-1008. PMLR, 2014.
|
| 383 |
+
[17] Jialei Wang, Mladen Kolar, Nathan Srebro, and Tong Zhang. Efficient distributed learning with sparsity. International Conference on Machine Learning, 70:3636-3645. PMLR, 2017.
|
| 384 |
+
[18] Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, Blaise Aguera y Arcas. Communication-efficient learning of deep networks from decentralized data. International Conference on Artificial Intelligence and Statistics, 54:1273-1282. PMLR, 2017.
|
| 385 |
+
[19] Sai Praneeth Karimireddy, Satyen Kale, Mehryar Mohri, Sashank Reddi, Sebastian Stich, and Ananda Theertha Suresh. Scaffold: Stochastic controlled averaging for federated learning. International Conference on Machine Learning, 119:5132-5143. PMLR, 2020.
|
| 386 |
+
[20] Jianyu Wang, Qinghua Liu, Hao Liang, Gauri Joshi, and H. Vincent Poor. Tackling the objective inconsistency problem in heterogeneous federated optimization. Advances in Neural Information Processing Systems, 33, 2020.
|
| 387 |
+
[21] Reese Pathak and Martin J. Wainwright. FedSplit: an algorithmic framework for fast federated optimization. Advances in Neural Information Processing Systems, 33, 2020.
|
| 388 |
+
[22] Aritra Mitra, Rayana Jaafar, George J. Pappas, and Hamed Hassani. Linear convergence in federated learning: tackling client heterogeneity and sparse gradients. Advances in Neural Information Processing Systems, 34, 2021.
|
| 389 |
+
[23] Zilong Zhao, Robert Birke, Aditya Kunar, and Lydia Y. Chen. Fed-TGAN: Federated learning framework for synthesizing tabular data. arXiv preprint arXiv:2108.07927, 2021.
|
| 390 |
+
[24] Vaikkunth Mugunthan, Vignesh Gokul, Lalana Kagal, and Shlomo Dubnov. Bias-free FedGAN: A federated approach to generate bias-free datasets. arXiv preprint arXiv:2108.07927, 2021.
|
| 391 |
+
[25] Yuyang Deng and Mehrdad Mahdavi. Local stochastic gradient descent ascent: convergence analysis and communication efficiency. International Conference on Artificial Intelligence and Statistics, 130:1387-1395. PMLR, 2021.
|
| 392 |
+
[26] Pranay Sharma, Rohan Panda, Gauri Joshi, and Pramod K. Varshney. Federated minimax optimization: Improved convergence analyses and algorithms. arXiv preprint arXiv:2203.04850, 2022.
|
| 393 |
+
[27] Po-Ling Loh and Martin J. Wainwright. High-dimensional regression with noisy and missing data: Provable guarantees with non-convexity. Advances in Neural Information Processing Systems, 24, 2011.
|
| 394 |
+
[28] Nasser Nasrabadi, Trac Tran, and Nam Nguyen. Robust Lasso with missing and grossly corrupted observations. Advances in Neural Information Processing Systems, 24, 2011.
|
| 395 |
+
[29] Mehryar Mohri, Afshin Rostamizadeh, and Ameet Talwalkar. Foundations of Machine Learning. MIT Press, second edition, 2018.
|
| 396 |
+
[30] Farzan Farnia and David Tse. A minimax approach to supervised learning. Advances in Neural Information Processing Systems, 29, 2016.
|
| 397 |
+
[31] Jaeho Lee and Maxim Raginsky. Minimax statistical learning with Wasserstein distances. Advances in Neural Information Processing Systems, 31, 2018.
|
| 398 |
+
[32] Farzan Farnia and Asuman Ozdaglar. Train simultaneously, generalize better: Stability of gradient-based minimax learners. International Conference on Machine Learning, 139:3174-3185. PMLR, 2021.
|
| 399 |
+
[33] John von Neumann. Zur theorie der gesellschaftsspiele. Mathematische Annalen, 100(1): 295-320, 1928.
|
| 400 |
+
[34] Julia Robinson. An iterative method of solving a game. Annals of Mathematics, pp. 296-301, 1951.
|
| 401 |
+
|
| 402 |
+
[35] Maurice Sion. On general minimax theorems. Pacific Journal of Mathematics, 8(1):171-176, 1958.
|
| 403 |
+
[36] G. M. Korpelevich. The extragradient method for finding saddle points and other problems. Matecon, 12:747-756, 1976.
|
| 404 |
+
[37] Yurii Nesterov. Dual extrapolation and its applications to solving variational inequalities and related problems. Mathematical Programming, 109(2-3):319-344, 2007.
|
| 405 |
+
[38] Aryan Mokhtari, Asuman Ozdaglar, and Sarath Pattathil. A unified analysis of extra-gradient and optimistic gradient methods for saddle point problems: proximal point approach. International Conference on Artificial Intelligence and Statistics, 108:1497-1507. PMLR, 2020.
|
| 406 |
+
[39] Gauthier Gidel, Hugo Berard, Gaetan Vignoud, Pascal Vincent, and Simon Lacoste-Julien. A variational inequality perspective on generative adversarial networks. arXiv preprint arXiv:1802.10551, 2018.
|
| 407 |
+
[40] Mingrui Liu, Youssef Mroueh, Jerret Ross, Wei Zhang, Xiaodong Cui, Payel Das, and Tianbao Yang. Towards better understanding of adaptive gradient algorithms in generative adversarial nets. arXiv preprint arXiv:1912.11940, 2019.
|
| 408 |
+
[41] Maher Nouiehed, Maziar Sanjabi, Tianjian Huang, Jason D. Lee, and Meisam Razaviyayn. Solving a class of non-convex min-max games using iterative first order methods. Advances in Neural Information Processing Systems, 32, 2019.
|
| 409 |
+
[42] Mingrui Liu, Youssef Mroueh, Wei Zhang, Xiaodong Cui, Tianbao Yang, and Payel Das. Decentralized parallel algorithm for training generative adversarial nets. Advances in Neural Information Processing Systems, 33, 2020.
|
| 410 |
+
[43] Jelena Diakonikolas, Constantinos Daskalakis, and Michael I. Jordan. Efficient methods for structured nonconvex-nonconcave min-max optimization. International Conference on Artificial Intelligence and Statistics, 130:2746-2754. PMLR, 2021.
|
| 411 |
+
[44] David Mateos-Núñez and Jorge Cortés. Distributed subgradient methods for saddle-point problems. IEEE Conference on Decision and Control, pp. 5462-5467, 2015.
|
| 412 |
+
[45] Aleksandr Beznosikov, Valentin Samokhin, and Alexander Gasnikov. Distributed saddle-point problems: Lower bounds, near-optimal and robust algorithms. arXiv preprint arXiv:2010.13112, 2020.
|
| 413 |
+
[46] Wenhan Xian, Feihu Huang, Yanfu Zhang, and Heng Huang. A faster decentralized algorithm for nonconvex minimax problems. Advances in Neural Information Processing Systems, 34, 2021.
|
| 414 |
+
[47] Alexander Rogozin, Aleksandr Beznosikov, Darina Dvinskikh, Dmitry Kovalev, Pavel Dvurechensky, and Alexander Gasnikov. Decentralized distributed optimization for saddle point problems. arXiv preprint arXiv:2102.07758, 2021.
|
| 415 |
+
[48] Yuyang Deng, Mohammad Mahdi Kamani, and Mehrdad Mahdavi. Distributionally robust federated averaging. Advances in Neural Information Processing Systems, 33, 2020.
|
| 416 |
+
[49] Amirhossein Reisizadeh, Farzan Farnia, Ramtin Pedarsani, and Ali Jabbabaie. Robust federated learning: The case of affine distribution shifts. Advances in Neural Information Processing Systems, 33, 2020.
|
| 417 |
+
[50] Mohammad Rasouli, Tao Sun, and Ram Rajagopal. Fedgan: Federated generative adversarial networks for distributed data. arXiv preprint arXiv:2006.07228, 2020.
|
| 418 |
+
[51] Yu Bai, Tengyu Ma, and Andrej Risteski. Approximability of discriminators implies diversity in GANs. arXiv preprint arXiv:1806.10586, 2018.
|
| 419 |
+
[52] Pengchuan Zhang, Qiang Liu, Dengyong Zhou, Tao Xu, and Xiaodong He. On the discrimination-generalization tradeoff in GANs. arXiv preprint arXiv:1711.02771, 2017.
|
| 420 |
+
[53] Sanjeev Arora, Rong Ge, Yingyu Liang, Tengyu Ma, and Yi Zhang. Generalization and equilibrium in generative adversarial nets (GANs). International Conference on Machine Learning, 70:224-232. PMLR, 2017.
|
| 421 |
+
[54] Dong Yin, Ramchandran Kannan, and Peter Bartlett. Rademacher complexity for adversarially robust generalization. International Conference on Machine Learning, 97:7085-7094, PMLR, 2019.
|
| 422 |
+
|
| 423 |
+
[55] Justin Khim and Po-Ling Loh. Adversarial risk bounds via function transformation. arXiv preprint arXiv:1810.09519, 2018.
|
| 424 |
+
[56] Colin Wei and Tengyu Ma. Improved sample complexities for deep networks and robust classification via an all-Layer margin. arXiv preprint arXiv:1910.04284, 2019.
|
| 425 |
+
[57] Idan Attias, Aryeh Kontorovich, and Yishay Mansour. Improved generalization bounds for robust learning. International Conference on Algorithmic Learning Theory, 98:162-183. PMLR, 2019.
|
| 426 |
+
[58] Junyu Zhang, Mingyi Hong, Mengdi Wang, and Shuzhong Zhang. Generalization bounds for stochastic saddle point problems. International Conference on Artificial Intelligence and Statistics, 130:568-576. PMLR, 2021.
|
| 427 |
+
[59] Chi Jin, Praneeth Netrapalli, and Michael I. Jordan. What is local optimality in nonconvex-nonconcave minimax optimization? arXiv preprint arXiv:1902.00618, 2019.
|
| 428 |
+
|
| 429 |
+
# Checklist
|
| 430 |
+
|
| 431 |
+
1. For all authors...
|
| 432 |
+
|
| 433 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 434 |
+
(b) Did you describe the limitations of your work? [Yes]
|
| 435 |
+
(c) Did you discuss any potential negative societal impacts of your work? [N/A]
|
| 436 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 437 |
+
|
| 438 |
+
2. If you are including theoretical results...
|
| 439 |
+
|
| 440 |
+
(a) Did you state the full set of assumptions of all theoretical results? [Yes]
|
| 441 |
+
(b) Did you include complete proofs of all theoretical results? [Yes]
|
| 442 |
+
|
| 443 |
+
3. If you ran experiments...
|
| 444 |
+
|
| 445 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes]
|
| 446 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes]
|
| 447 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [N/A]
|
| 448 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [Yes]
|
| 449 |
+
|
| 450 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 451 |
+
|
| 452 |
+
(a) If your work uses existing assets, did you cite the creators? [N/A]
|
| 453 |
+
(b) Did you mention the license of the assets? [N/A]
|
| 454 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [N/A]
|
| 455 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
|
| 456 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [N/A]
|
| 457 |
+
|
| 458 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 459 |
+
|
| 460 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
|
| 461 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 462 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
acommunicationefficientalgorithmwithlinearconvergenceforfederatedminimaxlearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5860555faa1808c71129ba04825be105d364fb77702d01db838459655ae3c359
|
| 3 |
+
size 255375
|
acommunicationefficientalgorithmwithlinearconvergenceforfederatedminimaxlearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b4f454dcfce3187cc097df3ed959a692c2a2814416f0807f0dbf005978d0b9e8
|
| 3 |
+
size 608394
|
acommunicationefficientdistributedgradientclippingalgorithmfortrainingdeepneuralnetworks/58789780-5503-41ae-b7e1-b83721f07851_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:be33b119397b9511cd0741f58d471e1948b6fd1f5ee50e9fa1811ef7e486f59f
|
| 3 |
+
size 84940
|
acommunicationefficientdistributedgradientclippingalgorithmfortrainingdeepneuralnetworks/58789780-5503-41ae-b7e1-b83721f07851_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:89b8fd59a270121a3db666f4f898eec8dce8dfa0fd8041631757d52d2fd20f04
|
| 3 |
+
size 111815
|
acommunicationefficientdistributedgradientclippingalgorithmfortrainingdeepneuralnetworks/58789780-5503-41ae-b7e1-b83721f07851_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9c7211b936a3694cf1e3159fd4049e219463a2685245901d798376fab0363ace
|
| 3 |
+
size 2613103
|
acommunicationefficientdistributedgradientclippingalgorithmfortrainingdeepneuralnetworks/full.md
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Communication-Efficient Distributed Gradient Clipping Algorithm for Training Deep Neural Networks
|
| 2 |
+
|
| 3 |
+
Mingrui Liu $^{1*}$ , Zhenxun Zhuang $^{2}$ , Yunwen Lei $^{3}$ , Chunyang Liao $^{4}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Department of Computer Science, George Mason University, Fairfax, VA 22030, USA
|
| 6 |
+
|
| 7 |
+
$^{2}$ Meta Platforms, Inc., Bellevue, WA, 98004, USA
|
| 8 |
+
|
| 9 |
+
$^{3}$ School of Computer Science, University of Birmingham, United Kingdom
|
| 10 |
+
|
| 11 |
+
<sup>4</sup> Department of Mathematics, Texas A&M University, College Station, Texas 77840, USA mingruil@gmu.edu, oldboymls@gmail.com, yunwen.lei@hotmail.com
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
In distributed training of deep neural networks, people usually run Stochastic Gradient Descent (SGD) or its variants on each machine and communicate with other machines periodically. However, SGD might converge slowly in training some deep neural networks (e.g., RNN, LSTM) because of the exploding gradient issue. Gradient clipping is usually employed to address this issue in the single machine setting, but exploring this technique in the distributed setting is still in its infancy: it remains mysterious whether the gradient clipping scheme can take advantage of multiple machines to enjoy parallel speedup. The main technical difficulty lies in dealing with nonconvex loss function, non-Lipschitz continuous gradient, and skipping communication rounds simultaneously. In this paper, we explore a relaxed-smoothness assumption of the loss landscape which LSTM was shown to satisfy in previous works, and design a communication-efficient gradient clipping algorithm. This algorithm can be run on multiple machines, where each machine employs a gradient clipping scheme and communicate with other machines after multiple steps of gradient-based updates. Our algorithm is proved to have $O\left(\frac{1}{N\epsilon^4}\right)$ iteration complexity and $O\left(\frac{1}{\epsilon^3}\right)$ communication complexity for finding an $\epsilon$ -stationary point in the homogeneous data setting, where $N$ is the number of machines. This indicates that our algorithm enjoys linear speedup and reduced communication rounds. Our proof relies on novel analysis techniques of estimating truncated random variables, which we believe are of independent interest. Our experiments on several benchmark datasets and various scenarios demonstrate that our algorithm indeed exhibits fast convergence speed in practice and thus validates our theory.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
Deep learning has achieved tremendous successes in many domains including computer vision [23], natural language processing [5] and game [39]. To obtain good empirical performance, people usually need to train large models on a huge amount of data, and it is usually very computationally expensive. To speed up the training process, distributed training becomes indispensable [4]. For example, Goyal et al. [12] trained a ResNet-50 on ImageNet dataset by distributed SGD with minibatch size 8192 on 256 GPUs in only one hour, which not only matches the small minibatch
|
| 20 |
+
|
| 21 |
+
Table 1: Comparison of Iteration and Communication Complexity of Different Algorithms for finding a point whose gradient's magnitude is smaller than $\epsilon$ (i.e., $\epsilon$ -stationary point defined in Definition 3), $N$ is the number of machines, the meaning of other constants can be found in Assumption 1. For the complexity of 9 in this table, we assume the gradient norm is upper bounded by $M$ such that the gradient is $(L_0 + L_1 M)$ -Lipschitz. However, the original paper of 9 does not require bounded gradient; instead, they require $L$ -Lipschitz gradient and bounded variance $\sigma^2$ . Under their assumption, their complexity result is $O\left(\Delta L \epsilon^{-2} + \Delta L \sigma^2 \epsilon^{-4}\right)$ .
|
| 22 |
+
|
| 23 |
+
<table><tr><td>Algorithm</td><td>Setting</td><td>Iteration Complexity</td><td>Communication Complexity</td></tr><tr><td>SGD [9]</td><td>Single</td><td>O(Δ(L0+L1M)ε-2+Δ(L0+L1M)σ2ε-4)</td><td>N/A</td></tr><tr><td>Clipped SGD [51]</td><td>Single</td><td>O((Δ+(L0+L1σ)σ2+σL02/L1)2)ε-4)</td><td>N/A</td></tr><tr><td>Clipping Framework [50]</td><td>Single</td><td>O(ΔL0σ2ε-4)</td><td>N/A</td></tr><tr><td>Naive Parallel of [50]</td><td>Distributed</td><td>O(ΔL0σ2/(Nε4))</td><td>O(ΔL0σ2/(Nε4))</td></tr><tr><td>Ours (this work)</td><td>Distributed</td><td>O(ΔL0σ2/(Nε4))</td><td>O(ΔL0σε-3)</td></tr></table>
|
| 24 |
+
|
| 25 |
+
accuracy but also enjoys parallel speedup, and hence improves the running time. Recently, local SGD [40, 47], as a variant of distributed SGD, achieved tremendous attention in federated learning community [29]. The local SGD algorithm runs multiple steps of SGD on each machine before communicating with other clients.
|
| 26 |
+
|
| 27 |
+
Despite the empirical success of distributed SGD and its variants (e.g., local SGD) in deep learning, they may not exhibit good performance when training some neural networks (e.g., Recurrent Neural Networks, LSTMs), due to the exploding gradient problem [33, 34]. To address this issue, Pascanu et al. [34] proposed to use the gradient clipping strategy, and it has become a standard technique when training language models [8, 35, 31]. There are some recent works trying to theoretically explain gradient clipping from the perspective of non-convex optimization [51, 50]. These works are built upon an important observation made in [51]: for certain neural networks such as LSTM, the gradient does not vary uniformly over the loss landscape (i.e., the gradient is not Lipschitz continuous with a uniform constant), and the gradient Lipschitz constant can scale linearly with respect to the gradient norm. This is referred to as the relaxed smoothness condition (i.e., $(L_0, L_1)$ -smoothness defined in Definition 2), which generalizes but strictly relaxes the usual smoothness condition (i.e., $L$ -smoothness defined in Definition 1). Under the relaxed smoothness condition, Zhang et al. [51, 50] proved that gradient clipping enjoys polynomial-time iteration complexity for finding the first-order stationary point in the single machine setting, and it can be arbitrarily faster than fix-step gradient descent. In practice, both distributed learning and gradient clipping are important techniques to accelerate neural network training. However, the theoretical analysis of gradient clipping is only restricted to the single machine setting [51, 50]. Hence it naturally motivates us to consider the following question:
|
| 28 |
+
|
| 29 |
+
Is it possible that the gradient clipping scheme can take advantage of multiple machines to enjoy parallel speedup in training deep neural networks, with only infrequent communication?
|
| 30 |
+
|
| 31 |
+
In this paper, we give an affirmative answer to the above question. Built upon the relaxed smoothness condition as in [51, 50], we design a communication-efficient distributed gradient clipping algorithm. The key characteristics of our algorithm are: (i) unlike naive parallel gradient clipping algorithm which requires averaging model weights and gradients from all machines for every iteration, our algorithm only aggregates weights with other machines after a certain number of local updates on each machine; (ii) our algorithm clips the gradient according to the norm of the local gradient on each machine, instead of the norm of the averaged gradients across machines as in the naive parallel algorithm. These key features make our algorithm amenable to the distributed setting with infrequent communication, and it is nontrivial to establish desired theoretical guarantees (e.g., linear speedup, reduced communication complexity). The main difficulty in the analysis lies in dealing with the nonconvex objective function, non-Lipschitz continuous gradient, and skipping communication rounds simultaneously. Our main contribution is summarized as the following:
|
| 32 |
+
|
| 33 |
+
- We design a novel communication-efficient distributed stochastic local gradient clipping algorithm, namely CELGC, for solving a nonconvex optimization problem under the relaxed smoothness condition. The algorithm only needs to clip the gradient according to the local gradient's magnitude and globally averages the weights on all machines periodically. To the best of our knowledge, this is the first work proposing communication-efficient distributed stochastic gradient clipping algorithms under the relaxed smoothness condition.
|
| 34 |
+
- Under the relaxed smoothness condition, we prove iteration and communication complexity results of our algorithm for finding an $\epsilon$ -stationary point, when each machine has access to the same data distribution. First, comparing with [50], we prove that our algorithm enjoys linear speedup, which means that the iteration complexity of our algorithm is reduced by a factor of $N$ (the number of machines). Second, comparing with the naive parallel version of the algorithm of [50], we prove that our algorithm enjoys better communication complexity. The detailed comparison over existing algorithms under the same relaxed smoothness condition is described in Table 1. To achieve this result, we introduce a novel technique of estimating truncated random variables, which is of independent interest and could be applied in related problems involving truncation operations such as gradient clipping.
|
| 35 |
+
- We empirically verify our theoretical results by conducting experiments on different neural network architectures on benchmark datasets. The experimental results demonstrate that our proposed algorithm indeed exhibits speedup in practice.
|
| 36 |
+
|
| 37 |
+
# 2 Related Work
|
| 38 |
+
|
| 39 |
+
Gradient Clipping/Normalization Algorithms In deep learning literature, gradient clipping (normalization) technique was initially proposed by [34] to address the issue of exploding gradient problem in [33], and it has become a standard technique when training language models [8, 35, 31]. It is shown that gradient clipping is robust and can mitigate label noise [30]. Recently gradient normalization techniques [45, 46] were applied to train deep neural networks on the very large batch setting. For example, You et al. [45] designed the LARS algorithm to train a ResNet50 on ImageNet with batch size $32k$ , which utilized different learning rates according to the norm of the weights and the norm of the gradient. In optimization literature, gradient clipping (normalization) was used in early days in the field of convex optimization [7, 11, 38]. Nesterov [32] and Hazan et al. [14] considered normalized gradient descent for quasi-convex functions in deterministic and stochastic cases respectively. Gorbunov et al. [10] designed an accelerated gradient clipping method to solve convex optimization problems with heavy-tailed noise in stochastic gradients. Mai and Johansson [26] established the stability and convergence of stochastic gradient clipping algorithms for convex and weakly convex functions. In nonconvex optimization, Levy [24] showed that normalized gradient descent can escape from saddle points. Cutosky and Mehta [3] showed that adding a momentum provably improves the normalized SGD in nonconvex optimization. Zhang et al. [51] and Zhang et al. [50] analyzed the gradient clipping for nonconvex optimization under the relaxed smoothness condition rather than the traditional $L$ -smoothness condition in nonconvex optimization [9]. However, all of them only consider the algorithm in the single machine setting or the naive parallel setting, and none of them can apply to the distributed setting where only limited communication is allowed.
|
| 40 |
+
|
| 41 |
+
Communication-Efficient Algorithms in Distributed and Federated Learning In large-scale machine learning, people usually train their model using first-order methods on multiple machines and these machines communicate and aggregate their model parameters periodically. When the function is convex, there is a scheme named one-shot averaging [56, 28, 54, 37], in which every machine runs a stochastic approximation algorithm and averages the model weights across machines only at the very last iteration. The one-shot averaging scheme is communication-efficient and enjoys statistical convergence with one pass of the data [54, 37, 16, 22], but the training error may not converge in practice. Mcmahan et al. [29] considered the Federated Learning setting where the data is decentralized and might be non-i.i.d. across devices and communication is expensive and designed the very first algorithm for federated learning (a.k.a., FedAvg). Stich [40] considered a concrete case of FedAvg, namely local SGD, which runs SGD independently in parallel on different works and averages the model parameters only once in a while. Stich [40] also showed that local SGD enjoys linear speedup for strongly-convex objective functions. There are also some works analyzing local SGD and its variants on convex [6, 20, 19, 43, 44, 11, 49] and nonconvex smooth functions [55, 47, 48, 17, 41, 25, 2, 13, 19]. Recently, Woodworth et al. [43, 44] analyzed advantages
|
| 42 |
+
|
| 43 |
+
and drawbacks of local SGD compared with minibatch SGD for convex objectives. Woodworth et al. [42] proved hardness results for distributed stochastic convex optimization. Reddi et al. [36] introduced a general framework of federated optimization and designed several federated versions of adaptive optimizers. Zhang et al. [52] considered employing gradient clipping to optimize $L$ -smooth functions and achieve differential privacy. Koloskova et al. [21] developed a unified theory of decentralized SGD with changing topology and local updates for smooth functions. Zhang et al. [53] developed a federated learning framework for nonconvex smooth functions for non-i.i.d. data. Due to a vast amount of literature on federated learning and limited space, we refer readers to [18] and references therein. However, all of these works either assume the objective function is convex or $L$ -smooth. To the best of our knowledge, our algorithm is the first communication-efficient algorithm that does not rely on these assumptions but still enjoys linear speedup.
|
| 44 |
+
|
| 45 |
+
# 3 Preliminaries, Notations and Problem Setup
|
| 46 |
+
|
| 47 |
+
Preliminaries and Notations Denote $\| \cdot \|$ by the Euclidean norm. We denote $f: \mathbb{R}^d \to \mathbb{R}$ as the overall loss function, and $F: \mathbb{R}^d \to \mathbb{R}$ as the loss function on $i$ -th machine, where $i \in [N] := \{1, \dots, N\}$ . Denote $\nabla h(\mathbf{x})$ as the gradient of $h$ evaluated at the point $\mathbf{x}$ , and denote $\nabla h(\mathbf{x}; \xi)$ as the stochastic gradient of $h$ calculated based on sample $\xi$ .
|
| 48 |
+
|
| 49 |
+
Definition 1 (L-smoothness). A function $h$ is L-smooth if $\| \nabla h(\mathbf{x}) - \nabla h(\mathbf{y}) \| \leq L \| \mathbf{x} - \mathbf{y} \|$ for all $\mathbf{x}, \mathbf{y} \in \mathbb{R}^d$ .
|
| 50 |
+
|
| 51 |
+
Definition 2 $((L_0,L_1)$ -smoothness). A second order differentiable function $h$ is $(L_0,L_1)$ -smooth if $\| \nabla^2 h(\mathbf{x})\| \leq L_0 + L_1\| \nabla h(\mathbf{x})\|$ for any $\mathbf{x}\in \mathbb{R}^d$ .
|
| 52 |
+
|
| 53 |
+
Definition 3 (ε-stationary point). $\mathbf{x} \in \mathbb{R}^d$ is an $\epsilon$ -stationary point of the function $h$ if $\|\nabla h(\mathbf{x})\| \leq \epsilon$ .
|
| 54 |
+
|
| 55 |
+
Remark: $(L_0, L_1)$ -smoothness is strictly weaker than $L$ -smoothness. First, we know that $L$ -smooth functions is $(L_0, L_1)$ -smooth with $L_0 = L$ and $L_1 = 0$ . However the reverse is not true. For example, consider the function $h(x) = x^4$ , we know that the gradient is not Lipschitz continuous and hence $h$ is not $L$ -smooth, but $|h''(x)| = 12x^2 \leq 12 + 3 \times 4|x|^3 = 12 + 3|h'(x)|$ , so $h(x) = x^4$ is (12, 3)-smooth. Zhang et al. [51] empirically showed that the $(L_0, L_1)$ -smoothness holds for the AWD-LSTM [31]. In nonconvex optimization literature [9, 50], the goal is to find an $\epsilon$ -stationary point since it is NP-hard to find a global optimal solution for a general nonconvex function.
|
| 56 |
+
|
| 57 |
+
Problem Setup In this paper, we consider the following optimization problem using $N$ machines:
|
| 58 |
+
|
| 59 |
+
$$
|
| 60 |
+
\min _ {\mathbf {x} \in \mathbb {R} ^ {d}} f (\mathbf {x}) = \mathbb {E} _ {\xi \sim \mathcal {D}} [ F (\mathbf {x}; \xi) ], \tag {1}
|
| 61 |
+
$$
|
| 62 |
+
|
| 63 |
+
where $\mathcal{D}$ stands for the data distribution which each machine has access to, and $f$ is the population loss function.
|
| 64 |
+
|
| 65 |
+
We make the following assumptions throughout the paper.
|
| 66 |
+
|
| 67 |
+
Assumption 1. (i) $f(\mathbf{x})$ is $(L_0, L_1)$ -smooth, i.e., $\| \nabla^2 f(\mathbf{x}) \| \leq L_0 + L_1 \| \nabla f(\mathbf{x}) \|$ , for $\forall \mathbf{x} \in \mathbb{R}^d$ .
|
| 68 |
+
|
| 69 |
+
(ii) There exists $\Delta > 0$ such that $f(\mathbf{x}_0) - f_* \leq \Delta$ , where $f_*$ is the global optimal value of $f$ .
|
| 70 |
+
(iii) For all $\mathbf{x} \in \mathbb{R}^d$ , $\mathbb{E}_{\xi \sim \mathcal{D}}[\nabla F(\mathbf{x}; \xi)] = \nabla f(\mathbf{x})$ , and $\| \nabla F(\mathbf{x}; \xi) - \nabla f(\mathbf{x}) \| \leq \sigma$ almost surely.
|
| 71 |
+
(iv) The distribution of $\nabla F(\mathbf{x};\xi)$ is symmetric around its mean $\nabla f(\mathbf{x})$ , and the density is monotonically decreasing over the $\ell_2$ distance between the mean and the value of random variable.
|
| 72 |
+
|
| 73 |
+
Remark: The Assumption $\boxed{1}$ (i) means that the loss function satisfies the relaxed-smoothness condition, and it holds when training a language model with LSTMs. We consider the homogeneous distributed learning setting, where each machine has access to same data distribution as in [44]. Assumption $\boxed{1}$ (ii) and (iii) are standard assumptions in nonconvex optimization [9, 51]. Note that it is usually assumed that the stochastic gradient is unbiased and has bounded variance [9], but in the relaxed smoothness setting, we follow [51] to assume we have unbiased stochastic gradient with almost surely bounded deviation $\sigma$ . Assumption 1 (iv) assumes the noise is unimodal and symmetric around its mean, which we empirically verify in Appendix E. Examples satisfying (iii) and (iv) include truncated Gaussian distribution, truncated student's t-distribution, etc.
|
| 74 |
+
|
| 75 |
+
# Algorithm 1 Communication Efficient Local Gradient Clipping (CELGC)
|
| 76 |
+
|
| 77 |
+
1: for $t = 0, \dots, T$ do
|
| 78 |
+
2: Each node $i$ samples its stochastic gradient $\nabla F(\mathbf{x}_t^i;\xi_t^i)$ , where $\xi_t^i\sim \mathcal{D}$
|
| 79 |
+
3: Each node $i$ updates its local solution in parallel:
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
\mathbf {x} _ {t + 1} ^ {i} = \mathbf {x} _ {t} ^ {i} - \min \left(\eta , \frac {\gamma}{\| \nabla F (\mathbf {x} _ {t} ^ {i} ; \xi_ {t} ^ {i}) \|}\right) \nabla F (\mathbf {x} _ {t} ^ {i}; \xi_ {t} ^ {i}) \tag {2}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
4: if $t$ is a multiple of $I$ then
|
| 86 |
+
5: Each worker resets the local solution as the averaged solution across nodes:
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\mathbf {x} _ {t} ^ {i} = \widehat {\mathbf {x}} := \frac {1}{N} \sum_ {j = 1} ^ {N} \mathbf {x} _ {t} ^ {j} \quad \forall i \in \{1, \dots , N \} \tag {3}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
6: end if
|
| 93 |
+
7: end for
|
| 94 |
+
|
| 95 |
+
# 4 Algorithm and Theoretical Analysis
|
| 96 |
+
|
| 97 |
+
# 4.1 Main Difficulty and the Algorithm Design
|
| 98 |
+
|
| 99 |
+
We briefly present the main difficulty in extending the single machine setting [50] to the distributed setting. In [50], they split the contribution of decreasing objective value by considering two cases: clipping large gradients and keeping small gradients. If communication is allowed at every iteration, then we can aggregate gradients on each machine and determine whether we should clip or keep the averaged gradient or not. However, in our setting, communicating with other machines at every iteration is not allowed. This would lead to the following difficulties: (i) the averaged gradient may not be available to the algorithm if communication is limited, so it is hard to determine whether clipping operation should be performed or not; (ii) the model weight on every machine may not be the same when communication does not happen at the current iteration; (iii) the loss function is not $L$ -smooth, so the usual local SGD analysis for $L$ -smooth functions cannot be applied in this case.
|
| 100 |
+
|
| 101 |
+
To address this issue, we design a new algorithm, namely Communication-Efficient Local Gradient Clipping (CELGC), which is presented in Algorithm 1. The algorithm calculates a stochastic gradient and then performs multiple local gradient clipping steps on each machine in parallel, and aggregates model parameters on all machines after every $I$ steps of local updates. We aim to establish iteration and communication complexity for Algorithm 1 for finding an $\epsilon$ -stationary point when $I > 1$ .
|
| 102 |
+
|
| 103 |
+
# 4.2 A Lemma for Truncated Random Variables
|
| 104 |
+
|
| 105 |
+
As indicated in the previous subsection, Algorithm $\boxed{1}$ clips gradient on each local machine. This feature greatly complicates the analysis: it is difficult to get an unbiased estimate of the stochastic gradient when its magnitude is not so large such that clipping does not happen. The reason is due to the dependency between random variables (i.e., stochastic gradient and the indicator of clipping). To get around of this difficulty, we introduce the following Lemma for estimating truncated random variables.
|
| 106 |
+
|
| 107 |
+
Lemma 1. Denote by $\mathbf{g} \in \mathbb{R}^d$ a random vector. Suppose the distribution of $\mathbf{g}$ is symmetric around its mean, and the density is monotonically decreasing over the $\ell_2$ distance between the mean and the value of random variable, then there exists $\Lambda = \mathrm{diag}(c_1, \ldots, c_d)$ and $0 < c_i \leq 1$ with $i = 1, \ldots, d$ such that
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\mathbb {E} \left[ \mathbf {g} \mathbb {I} (\| \mathbf {g} \| \leq \alpha) \right] = P r (\| \mathbf {g} \| \leq \alpha) \Lambda \mathbb {E} [ \mathbf {g} ], \tag {4}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
where $\alpha > 0$ is a constant, $\mathbb{I}(\cdot)$ is the indicator function.
|
| 114 |
+
|
| 115 |
+
The proof of Lemma $\square$ is included in Appendix $\mathbb{B}$ . This Lemma provides an unbiased estimate for a truncated random variable. In the subsequent of this paper, we regard $\mathbf{g}$ in the Lemma as the stochastic gradient and $\alpha$ as the clipping threshold. In addition, we define $c_{\min} = \min(c_1, \ldots, c_d)$ , and $c_{\max} = \max(c_1, \ldots, c_d)$ . We have $0 < c_{\min} \leq c_{\max} \leq 1$ .
|
| 116 |
+
|
| 117 |
+
# 4.3 Main Results
|
| 118 |
+
|
| 119 |
+
Theorem 1. Suppose Assumption $\boxed{I}$ holds and $\sigma \geq 1$ . Take $\epsilon \leq \min \left(\frac{AL_0}{BL_1}, 0.1\right)$ be a small enough constant and $N \leq \min \left(\frac{1}{\epsilon}, \frac{14AL_0}{5BL_1\epsilon}\right)$ . In Algorithm $\boxed{I}$ , choose $I \leq \sqrt{\frac{1}{c_{\min}}\frac{\sigma}{N\epsilon}}$ , $\gamma \leq c_{\min}\frac{N\epsilon}{28\sigma}\min \left\{\frac{\epsilon}{AL_0},\frac{1}{BL_1}\right\}$ and the fixed ratio $\frac{\gamma}{\eta} = 5\sigma$ , where $A \geq 1$ and $B \geq 1$ are constants which will be specified in the proof, and run Algorithm $\boxed{I}$ for $T = O\left(\frac{\Delta L_0\sigma^2}{N\epsilon^4}\right)$ iterations. Define $\bar{\mathbf{x}}_t = \frac{1}{N}\sum_{i=1}^{N}\mathbf{x}_t^i$ . Then we have $\frac{1}{T}\sum_{t=1}^{T}\mathbb{E}\|\nabla f(\bar{\mathbf{x}}_t)\| \leq 9\epsilon$ .
|
| 120 |
+
|
| 121 |
+
Remark: We have some implications of Theorem I. When the number of machines is not large (i.e., $N \leq O(1 / \epsilon)$ ) and the number of skipped communications is not large (i.e., $I \leq O(\sigma / \epsilon N)$ ), then with proper setting of the learning rate, we have following observations. First, our algorithm enjoys linear speedup, since the number of iterations we need to find an $\epsilon$ -stationary point is divided by the number of machines $N$ when comparing the single machine algorithm in [50]. Second, our algorithm is communication-efficient, since the communication complexity (a.k.a., number of rounds) is $T / I = O\left(\Delta L_0 \sigma \epsilon^{-3}\right)$ , which provably improves the naive parallel gradient clipping algorithm of [50] with $O(\Delta L_0 \sigma^2 / (N \epsilon^4))$ communication complexity when $N \leq O(1 / \epsilon)$ .
|
| 122 |
+
|
| 123 |
+
Another important fact is that both iteration complexity and communication complexity only depend on $L_{0}$ while being independent of $L_{1}$ and the gradient upper bound $M$ . This indicates that our algorithm does not suffer from slow convergence even if these quantities are large. This is in line with [50] as well. In addition, local gradient clipping is a good mechanism to alleviate the bad effects brought by a rapidly changing loss landscape (e.g., some language models such as LSTM).
|
| 124 |
+
|
| 125 |
+
# 4.4 Sketch of the Proof of Theorem 1
|
| 126 |
+
|
| 127 |
+
In this section, we present the sketch of our proof of Theorem $\boxed{1}$ . The detailed proof can be found in Appendix $\boxed{C}$ . The key idea in our proof is to establish the descent property of the sequence $\{f(\bar{\mathbf{x}}_t)\}_{t=0}^T$ in the distributed setting under the relaxed smoothness condition, where $\bar{\mathbf{x}}_t = \frac{1}{N} \sum_{i=1}^{t} \mathbf{x}_t^i$ is the averaged weight across all machines at $t$ -th iteration. The main challenge is that the descent property of $(L_0, L_1)$ -smooth function in the distributed setting does not naturally hold, which is in sharp contrast to the usual local SGD proof for $L$ -smooth functions. To address this challenge, we need to carefully study whether the algorithm is able to decrease the objective function in different situations. Our main technical innovations in the proof are listed as the following.
|
| 128 |
+
|
| 129 |
+
First, we monitor the algorithm's progress in decreasing the objective value according to some novel measures. The measures we use are the magnitude of the gradient evaluated at the averaged weight and the magnitude of local gradients evaluated at the individual weights on every machine. To this end, we introduce Lemma $\boxed{3}$ whose goal is to carefully inspect how much progress the algorithm makes, according to the magnitude of local gradients calculated on each machine. The reason is that the local gradient's magnitude is an indicator of whether the clipping operation happens or not. For each fixed iteration $t$ , we define $J(t) = \{i \in [N] : \| \nabla F(\mathbf{x}_t^i, \xi_t^i) \| \geq \gamma / \eta\}$ and $\bar{J}(t) = [N] \setminus J(t)$ . Briefly speaking, $J(t)$ contains all machines that perform clipping operation at iteration $t$ and $\bar{J}(t)$ is the set of machines that do not perform clip operation at iteration $t$ . In Lemma $\boxed{3}$ , we perform the one-step analysis and consider all machines with different clipping behaviors at the iteration $t$ . The proof of Lemma $\boxed{3}$ crucially relies on the lemma for estimating truncated random variables (i.e., Lemma $\boxed{1}$ ) to calculate the expectation of non-clipped gradients.
|
| 130 |
+
|
| 131 |
+
Second, Zhang et al. [50] inspect their algorithm's progress by considering the magnitude of the gradient at different iterations, so they treat every iteration differently. However, this approach does not work in the distributed setting with infrequent communication since one cannot get access to the averaged gradient across machines at every iteration. Instead, we treat every iteration of the algorithm as the same but consider the progress made by each machine.
|
| 132 |
+
|
| 133 |
+
Third, by properly choosing hyperparameters $(\eta, \gamma, I)$ and using an amortized analysis, we prove that our algorithm can decrease the objective value by a sufficient amount, and the sufficient decrease is mainly due to the case where the gradient is not too large (i.e., clipping operations do not happen). This important insight allows us to better characterize the training dynamics.
|
| 134 |
+
|
| 135 |
+
Now we present how to proceed with the proof in detail.
|
| 136 |
+
|
| 137 |
+
Lemma [2] characterizes the $\ell_2$ error between averaged weight and individual weights at $t$ -th iteration.
|
| 138 |
+
|
| 139 |
+
Lemma 2. Under Assumption $\boxed{I}$ for any $i$ and $t$ , Algorithm $\boxed{I}$ ensures $\| \bar{\mathbf{x}}_t - \mathbf{x}_t^i\| \leq 2\gamma I$ holds almost surely.
|
| 140 |
+
|
| 141 |
+
Lemma 5 and Lemma 6 (in Appendix A) are properties of $(L_0, L_1)$ -smooth functions we need to use. To make sure they work, we need $2\gamma I \leq c / L_1$ for some $c > 0$ . This is proved in the proof of Theorem 1 (in Appendix C).
|
| 142 |
+
|
| 143 |
+
Let $J(t)$ be the index set of $i$ such that $\| \nabla F(\mathbf{x}_t^i, \xi_t^i) \| \geq \frac{\gamma}{\eta}$ at fixed iteration $t$ , i.e., $J(t) = \{i \in [N] \mid \| \nabla F(\mathbf{x}_t^i; \xi_t^i) \| \geq \gamma / \eta\}$ . Lemma 3 characterizes how much progress we can get in one iteration of Algorithm [1] which is decomposed into contributions from every machine (note that $J(t) \cup \bar{J}(t) = \{1, \dots, N\}$ for every $t$ ).
|
| 144 |
+
|
| 145 |
+
Lemma 3. Let $J(t)$ be the set defined as above. If $2\gamma I \leq c / L_1$ for some $c > 0$ , $AL_0\eta \leq 1 / 2$ , and $\gamma /\eta = 5\sigma$ , then we have
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
\begin{array}{l} \mathbb {E} \left[ f \left(\bar {\mathbf {x}} _ {t + 1}\right) - f \left(\bar {\mathbf {x}} _ {t}\right) \right] \\ \leq \frac {1}{N} \mathbb {E} \sum_ {i \in J (t)} \left[ - \frac {2 \gamma}{5} \| \nabla f (\bar {\mathbf {x}} _ {t}) \| - \frac {3 \gamma^ {2}}{5 \eta} + \frac {5 0 A L _ {0} \eta^ {2} \sigma^ {2}}{N} + \frac {7 \gamma}{5} \| \nabla F (\mathbf {x} _ {t} ^ {i}; \xi_ {t} ^ {i}) - \nabla f (\bar {\mathbf {x}} _ {t}) \| + A L _ {0} \gamma^ {2} + \frac {B L _ {1} \gamma^ {2} \| \nabla f (\bar {\mathbf {x}} _ {t}) \|}{2} \right] \\ + \mathbb {E} \frac {1}{N} \sum_ {i \in \bar {J} (t)} \left[ - \frac {\eta c _ {\mathrm {m i n}}}{2} \| \nabla f (\bar {\mathbf {x}} _ {t}) \| ^ {2} + 4 \gamma^ {2} I ^ {2} A ^ {2} L _ {0} ^ {2} \eta + 4 \gamma^ {2} I ^ {2} B ^ {2} L _ {1} ^ {2} \eta \| \nabla f (\bar {\mathbf {x}} _ {t}) \| ^ {2} + \frac {5 0 A L _ {0} \eta^ {2} \sigma^ {2}}{N} + \frac {B L _ {1} \gamma^ {2} \| \nabla f (\bar {\mathbf {x}} _ {t}) \|}{2} \right], \\ \end{array}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+
where $A = 1 + e^{c} - \frac{e^{c} - 1}{c}$ and $B = \frac{e^c - 1}{c}$ .
|
| 152 |
+
|
| 153 |
+
Lemma 4 quantifies an upper bound of the averaged $\ell_2$ error between the local gradient evaluated at the local weight and the gradient evaluated at the averaged weight.
|
| 154 |
+
|
| 155 |
+
Lemma 4. Suppose Assumption $\boxed{I}$ holds. When $2\gamma I \leq c / L_1$ for some $c > 0$ , the following inequality holds for every $i$ almost surely with $A = 1 + e^c - \frac{e^c - 1}{c}$ and $B = \frac{e^c - 1}{c}$ :
|
| 156 |
+
|
| 157 |
+
$$
|
| 158 |
+
\left\| \nabla F \left(\mathbf {x} _ {t} ^ {i}; \xi_ {t} ^ {i}\right) - \nabla f (\bar {\mathbf {x}} _ {t}) \right\| \leq \sigma + 2 \gamma I \left(A L _ {0} + B L _ {1} \| \nabla f (\bar {\mathbf {x}} _ {t}) \|\right).
|
| 159 |
+
$$
|
| 160 |
+
|
| 161 |
+
Putting all together Suppose our algorithm runs $T$ iterations. Taking summation on both sides of Lemma 3 over all $t = 0, \dots, T - 1$ , we are able to get an upper bound of $\sum_{t=0}^{T-1} \mathbb{E}[f(\bar{\mathbf{x}}_{t+1}) - f(\bar{\mathbf{x}}_t)] = \mathbb{E}[f(\bar{\mathbf{x}}_T) - f(\bar{\mathbf{x}}_0)]$ . Note that $\mathbb{E}[f(\bar{\mathbf{x}}_T) - f(\bar{\mathbf{x}}_0)] \geq -\Delta$ due to Assumption 1 so we are able to get an upper bound of gradient norm. For details, please refer to the proof of Theorem 1 in Appendix C
|
| 162 |
+
|
| 163 |
+
# 5 Experiments
|
| 164 |
+
|
| 165 |
+
We conduct extensive experiments to validate the merits of our algorithm on various tasks (e.g., image classification, language modeling). In the main text below, we consider the homogeneous data setting where each machine has the same data distribution and every machine participate the communication at each round. In the Appendix D, we also test our algorithm in the general federated learning setting (e.g., partial participation of machines), ablation study on small/large batch-sizes, and other experiments, etc.
|
| 166 |
+
|
| 167 |
+
We conduct each experiment in two nodes with 4 Nvidia-V100 GPUs on each node. In our experiments, one "machine" corresponds to one GPU, and we use the word "GPU" and "machine" in this section interchangeably. We compared our algorithm with the baseline across three deep learning benchmarks: CIFAR-10 image classification with ResNet, Penn Treebank language modeling with LSTM, and Wikitext-2 language modeling with LSTM, and ImageNet classification with ResNet. All algorithms and the training framework are implemented in Pytorch 1.4. Due to limited computational resources, for our algorithms, we choose same hyperparameters like the clipping thresholds according to the best-tuned baselines unless otherwise specified.
|
| 168 |
+
|
| 169 |
+
We compare our algorithm of different $I$ with the baseline, which is the naive parallel version of the algorithm in [50]. We want to re-emphasize that the difference is that the baseline algorithm needs to average the model weights and local gradients at every iteration while ours only requires averaging the model weights after every $I$ iterations. We find that our algorithm with a range of values of $I$ can
|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
(a) Over epoch
|
| 173 |
+
|
| 174 |
+

|
| 175 |
+
(b) Over wall clock time
|
| 176 |
+
|
| 177 |
+

|
| 178 |
+
Figure 1: Algorithm $\boxed{1}$ with different $I$ : Training loss and test accuracy v.s. (Left) epoch and (right) wall clock time on training a 56 layer Resnet to do image classification on CIFAR10.
|
| 179 |
+
(a) Over epoch
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
(b) Over Wall clock time
|
| 183 |
+
Figure 2: Algorithm $\boxed{1}$ with different $I$ : Training loss and validation perplexity v.s. (Left) epoch and (right) wall clock time on training an AWD-LSTM to do language modeling on Penn Treebank.
|
| 184 |
+
|
| 185 |
+
match the results of the baseline in terms of epochs on different models and data. This immediately suggests that our algorithm will gain substantial speedup in terms of the wall clock time, which is also supported by our experiments.
|
| 186 |
+
|
| 187 |
+
# 5.1 Effects of Skipping Communication
|
| 188 |
+
|
| 189 |
+
We focus on one feature of our algorithm: skipping communication rounds. Theorem $\boxed{1}$ says that our algorithm enjoys reduced communication complexity since every node only communicates with other nodes periodically with node synchronization interval length $I$ . To study how communication skipping affects the convergence of Algorithm $\boxed{1}$ , we run it with $I \in \{2, 4, 8, 16, 32\}$ .
|
| 190 |
+
|
| 191 |
+
CIFAR-10 classification with ResNet-56. We train the standard 56-layer ResNet architecture on CIFAR-10. We use SGD with clipping as the baseline algorithm with a stagewise decaying learning rate schedule, following the widely adopted fashion on training the ResNet architecture. Specifically, we use the initial learning rate $\eta = 0.3$ , the clipping threshold $\gamma = 1.0$ , and decrease the learning rate by a factor of 10 at epoch 80 and 120. The local batch size at each GPU is 64. These parameter settings follow that of [47].
|
| 192 |
+
|
| 193 |
+
The results are illustrated in Figure I. Figure 1a shows the convergence of training loss and test accuracy v.s. the number of epochs that are jointly accessed by all GPUs. This means that, if the x-axis value is 8, then each GPU runs 1 epoch of training data. The same convention applied to all other figures for multiple GPU training in this paper. Figure 1b verifies our algorithm's advantage of skipping communication by plotting the convergence of training loss and test accuracy v.s. the wall clock time. Overall, we can clearly see that our algorithm matches the baseline epoch-wise but greatly speeds up wall-clock-wise.
|
| 194 |
+
|
| 195 |
+
Language modeling with LSTM on Penn Treebank. We adopt the 3-layer AWD-LSTM [31] to do language modeling on Penn Treebank (PTB) dataset [27](word level). We use SGD with clipping as the baseline algorithm with the clipping threshold $\gamma = 7.5$ . The local batch size at each GPU is 3. These parameter settings follow that of [50]. We fine-tuned the initial learning rate $\eta$ for all algorithms (including the baseline) by choosing the one giving the smallest final training loss in the range $\{0.1, 0.5, 1, 5, 10, 20, 30, 40, 50, 100\}$ .
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
(a) Over epoch
|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
(b) Over Wall clock time
|
| 202 |
+
Figure 3: Algorithm $\boxed{1}$ with different $I$ : Training loss and validation perplexity v.s. (Left) epoch and (right) wall clock time on training an AWD-LSTM to do language modeling on Wikitext-2.
|
| 203 |
+
|
| 204 |
+
We report the results in Figure 2. It can be seen that we can match the baseline in both training loss and validation perplexity epoch-wise while gaining substantial speedup (4x faster for $I = 4$ ) wall-clock-wise.
|
| 205 |
+
|
| 206 |
+
Language modeling with LSTM on Wikitext-2. We adopt the 3-layer AWD-LSTM [31] to do language modeling on Wikitext-2 dataset [27](word level). We use SGD with clipping as the baseline algorithm with the clipping threshold $\gamma = 7.5$ . The local batch size at each GPU is 10. These parameter settings follow that of [31]. We fine-tuned the initial learning rate $\eta$ for all algorithms (including the baseline) by choosing the one giving the smallest final training loss in the range $\{0.1, 0.5, 1, 5, 10, 20, 30, 40, 50, 100\}$ .
|
| 207 |
+
|
| 208 |
+
We report the results in Figure 3. We can match the baseline in both training loss and validation perplexity epoch-wise, but we again obtain large speedup (4x faster for $I = 4$ ) wall-clock-wise. This, together with the above two experiments, clearly show our algorithm's effectiveness in speeding up the training in distributed settings. Another observation is that Algorithm 1 can allow relatively large $I$ without hurting the convergence behavior.
|
| 209 |
+
|
| 210 |
+
ImageNet Classification with ResNet-50 We compared our algorithm with several baselines in training a ResNet-50 on ImageNet. We compared with two strong baselines: one is the Naive Parallel SGDClip, another is a well-accepted baseline for ImageNet by [12]. We run the experiments on 8 GPUs. We follow the settings of [12] to setup hyperparameter for baselines. Specifically, for every method, the initial learning rate is 0.0125, and we use the warmup with 5 epochs, batch size 32, momentum parameter 0.9, the weight decay $5 \times 10^{-4}$ . The learning rate multiplying factor is 1 for the epoch $5 \sim 30$ , and 0.1 for epoch $30 \sim 60$ , 0.01 for epochs $60 \sim 80$ , and 0.001 for epoch $80 \sim 90$ . The clipping threshold for Naive Parallel SGDClip and our method CELGC are both set to be 1. We consider our algorithm with $I = 4$ , i.e., our algorithm CELGC performs weight averaging after 4 steps of local stochastic gradient descent with gradient clipping on each GPU.
|
| 211 |
+
|
| 212 |
+
The results are shown in Figure 6. We report the performance of these methods from several different perspectives (training accuracy/validation accuracy versus epoch, and training accuracy/validation accuracy versus wallclock time). We can see that the training accuracy of our algorithm CELGC with $I = 4$ can match both baselines in terms of epoch (Figure 6a), but it is much better in terms of running time (Figure 6b).
|
| 213 |
+
|
| 214 |
+
# 5.2 Verifying Parallel Speedup
|
| 215 |
+
|
| 216 |
+
Figure 4 show the training loss and test accuracy v.s. the number of iterations. In the distributed setting, one iteration means running one step of Algorithm I on all machines; while in the single machine setting, one iteration means running one step of SGD with clipping. In our experiment, we use minibatch size 64 on every GPU in the distributed setting to run Algorithm II while we also use 64 minibatch size on the single GPU to run SGD with clipping. In the left two panels of Figure 4, we can clearly find that even with $I > 1$ , our algorithm still enjoys parallel speedup, since our algorithm requires less number of iterations to converge to the same targets (e.g., training loss, test accuracy). This observation is consistent with our iteration complexity results in Theorem II
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
Figure 4: Performance v.s. # of iterations each GPU runs on training ResNet-56 on CIFAR-10 showing the parallel speedup.
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
(a) CIFAR-10
|
| 223 |
+
|
| 224 |
+

|
| 225 |
+
(b) Penn Treebank
|
| 226 |
+
Figure 5: Proportions of iterations in each epoch in which clipping is triggered v.s. epochs showing clipping is very frequent.
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
(a) Performance over epoch
|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
(b) Performance over wall clock time
|
| 233 |
+
Figure 6: Training loss and test accuracy v.s. epoch (left) and wall clock time (right) on training a Resnet-50 to do image classification on ImageNet.
|
| 234 |
+
|
| 235 |
+
# 5.3 Clipping Operation Happens Frequently
|
| 236 |
+
|
| 237 |
+
Figure 5 reports the proportion of iterations in each epoch that clipping is triggered. We observe that for our algorithm, clipping happens more frequently than the baseline, especially for NLP tasks. We conjecture that this is because we only used local gradients in each GPU to do the clipping without averaging them across all machines as the baseline did. This leads to more stochasticity of the norm of the gradient in our algorithm than the baseline, and thus causes more clippings to happen. This observation highlights the importance of studying clipping algorithms in the distributed setting. Another interesting observation is that clipping happens much more frequently when training language models than image classification models. Hence this algorithm is presumably more effective in training deep models in NLP tasks.
|
| 238 |
+
|
| 239 |
+
# 6 Conclusion
|
| 240 |
+
|
| 241 |
+
In this paper, we design a communication-efficient distributed stochastic local gradient clipping algorithm to train deep neural networks. By exploring the relaxed smoothness condition which was shown to be satisfied for certain neural networks, we theoretically prove both the linear speedup property and the improved communication complexity when the data distribution across machines is homogeneous. Our empirical studies show that our algorithm indeed enjoys parallel speedup and greatly improves the runtime performance in various federated learning scenarios (e.g., partial client participation). One limitation of our work is that our convergence analysis is only applicable for homogeneous data, and it would be interesting to analyze the settings of heterogeneous data theoretically in the future.
|
| 242 |
+
|
| 243 |
+
# Acknowledgements
|
| 244 |
+
|
| 245 |
+
We would like to thank the anonymous reviewers for their help comments. Mingrui Liu is supported by a grant at George Mason University. Computations were run on ARGO, a research computing cluster provided by the Office of Research Computing at George Mason University (URL: https://orc.gmu.edu). The majority of work of Zhenxun Zhuang was done when he was a Ph.D. student at Boston University.
|
| 246 |
+
|
| 247 |
+
# References
|
| 248 |
+
|
| 249 |
+
[1] Ya I Alber, Alfredo N. Iusem, and Mikhail V. Solodov. On the projected subgradient method for nonsmooth convex optimization in a hilbert space. Mathematical Programming, 81(1):23-35, 1998.
|
| 250 |
+
[2] Debraj Basu, Deepesh Data, Can Karakus, and Suhas Diggavi. Qsparse-local-sgd: Distributed sgd with quantization, sparsification and local computations. In Advances in Neural Information Processing Systems, pages 14668-14679, 2019.
|
| 251 |
+
[3] Ashok Cutkosky and Harsh Mehta. Momentum improves normalized sgd. In International Conference on Machine Learning, pages 2260-2268. PMLR, 2020.
|
| 252 |
+
[4] Jeffrey Dean, Greg Corrado, Rajat Monga, Kai Chen, Matthieu Devin, Mark Mao, Marc'aurilio Ranzato, Andrew Senior, Paul Tucker, Ke Yang, et al. Large scale distributed deep networks. In Advances in neural information processing systems, pages 1223-1231, 2012.
|
| 253 |
+
[5] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
|
| 254 |
+
[6] Aymeric Dieuleveut and Kumar Kshitij Patel. Communication trade-offs for local-sgd with large step size. Advances in Neural Information Processing Systems, 32:13601-13612, 2019.
|
| 255 |
+
[7] Yuri Ermoliev. Stochastic quasigraind methods. numerical techniques for stochastic optimization. Springer Series in Computational Mathematics, (10):141-185, 1988.
|
| 256 |
+
[8] Jonas Gehring, Michael Auli, David Grangier, Denis Yarats, and Yann N Dauphin. Convolutional sequence to sequence learning. In International Conference on Machine Learning, pages 1243-1252. PMLR, 2017.
|
| 257 |
+
[9] Saeed Ghadimi and Guanghui Lan. Stochastic first-and zeroth-order methods for nonconvex stochastic programming. SIAM Journal on Optimization, 23(4):2341-2368, 2013.
|
| 258 |
+
[10] Eduard Gorbunov, Marina Danilova, and Alexander Gasnikov. Stochastic optimization with heavy-tailed noise via accelerated gradient clipping. arXiv preprint arXiv:2005.10785, 2020.
|
| 259 |
+
[11] Eduard Gorbunov, Filip Hanzely, and Peter Richtárik. Local sgd: Unified theory and new efficient methods. In International Conference on Artificial Intelligence and Statistics, pages 3556-3564. PMLR, 2021.
|
| 260 |
+
[12] Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch sgd: TrainingImagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017.
|
| 261 |
+
[13] Farzin Haddadpour, Mohammad Mahdi Kamani, Mehrdad Mahdavi, and Viveck Cadambe. Local sgd with periodic averaging: Tighter analysis and adaptive synchronization. In Advances in Neural Information Processing Systems, pages 11080-11092, 2019.
|
| 262 |
+
[14] Elad Hazan, Kfir Y Levy, and Shai Shalev-Shwartz. Beyond convexity: Stochastic quasi-convex optimization. arXiv preprint arXiv:1507.02030, 2015.
|
| 263 |
+
[15] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016.
|
| 264 |
+
[16] Prateek Jain, Sham M Kakade, Rahul Kidambi, Praneeth Netrapalli, and Aaron Sidford. Parallelizing stochastic gradient descent for least squares regression: Mini-batching, averaging, and model misspecification. Journal of Machine Learning Research, 18:223-1, 2017.
|
| 265 |
+
[17] Peng Jiang and Gagan Agrawal. A linear speedup analysis of distributed deep learning with sparse and quantized communication. In Advances in Neural Information Processing Systems, pages 2525-2536, 2018.
|
| 266 |
+
|
| 267 |
+
[18] Peter Kairouz, H Brendan McMahan, Brendan Avent, Aurélien Bellet, Mehdi Bennis, Arjun Nitin Bhagoji, Kallista Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, et al. Advances and open problems in federated learning. arXiv preprint arXiv:1912.04977, 2019.
|
| 268 |
+
[19] Sai Praneeth Karimireddy, Satyen Kale, Mehryar Mohri, Sashank Reddi, Sebastian Stich, and Ananda Theertha Suresh. Scaffold: Stochastic controlled averaging for federated learning. In International Conference on Machine Learning, pages 5132-5143. PMLR, 2020.
|
| 269 |
+
[20] Ahmed Khaled, Konstantin Mishchenko, and Peter Richtárik. Tighter theory for local sgd on identical and heterogeneous data. In International Conference on Artificial Intelligence and Statistics, pages 4519-4529. PMLR, 2020.
|
| 270 |
+
[21] Anastasia Koloskova, Nicolas Loizou, Sadra Boreiri, Martin Jaggi, and Sebastian Stich. A unified theory of decentralized sgd with changing topology and local updates. In International Conference on Machine Learning, pages 5381-5393. PMLR, 2020.
|
| 271 |
+
[22] Anastasia Koloskova, Sebastian U. Stich, and Martin Jaggi. Decentralized stochastic optimization and gossip algorithms with compressed communication. In Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, pages 3478-3487, 2019.
|
| 272 |
+
[23] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in neural information processing systems, pages 1097-1105, 2012.
|
| 273 |
+
[24] Kfir Y Levy. The power of normalization: Faster evasion of saddle points. arXiv preprint arXiv:1611.04831, 2016.
|
| 274 |
+
[25] Tao Lin, Sebastian U Stich, Kumar Kshitij Patel, and Martin Jaggi. Don't use large mini-batches, use local sgd. arXiv preprint arXiv:1808.07217, 2018.
|
| 275 |
+
[26] Vien V Mai and Mikael Johansson. Stability and convergence of stochastic gradient clipping: Beyond lipschitz continuity and smoothness. arXiv preprint arXiv:2102.06489, 2021.
|
| 276 |
+
[27] Mitchell P. Marcus, Mary Ann Marcinkiewicz, and Beatrice Santorini. Building a large annotated corpus of english: The penn treebank. Comput. Linguist., 19(2):313-330, June 1993.
|
| 277 |
+
[28] Ryan McDonald, Keith Hall, and Gideon Mann. Distributed training strategies for the structured perceptron. In Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics, pages 456-464. Association for Computational Linguistics, 2010.
|
| 278 |
+
[29] H Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, et al. Communication-efficient learning of deep networks from decentralized data. AISTATS, 2017.
|
| 279 |
+
[30] Aditya Krishna Menon, Ankit Singh Rawat, Sashank J Reddi, and Sanjiv Kumar. Can gradient clipping mitigate label noise? In International Conference on Learning Representations, 2019.
|
| 280 |
+
[31] Stephen Merity, Nitish Shirish Keskar, and Richard Socher. Regularizing and optimizing LSTM language models. In International Conference on Learning Representations, 2018.
|
| 281 |
+
[32] Yurii E Nesterov. Minimization methods for nonsmooth convex and quasiconvex functions. Matekon, 29:519-531, 1984.
|
| 282 |
+
[33] Razvan Pascanu, Tomas Mikolov, and Yoshua Bengio. Understanding the exploding gradient problem. corr abs/1211.5063 (2012). arXiv preprint arXiv:1211.5063, 2012.
|
| 283 |
+
[34] Razvan Pascanu, Tomas Mikolov, and Yoshua Bengio. On the difficulty of training recurrent neural networks. In International conference on machine learning, pages 1310-1318. PMLR, 2013.
|
| 284 |
+
[35] Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. Deep contextualized word representations. arXiv preprint arXiv:1802.05365, 2018.
|
| 285 |
+
|
| 286 |
+
[36] Sashank Reddi, Zachary Charles, Manzil Zaheer, Zachary Garrett, Keith Rush, Jakub Konecny, Sanjiv Kumar, and H Brendan McMahan. Adaptive federated optimization. ICLR, 2021.
|
| 287 |
+
[37] Ohad Shamir and Nathan Srebro. Distributed stochastic optimization and learning. In 2014 52nd Annual Allerton Conference on Communication, Control, and Computing (Allerton), pages 850-857. IEEE, 2014.
|
| 288 |
+
[38] Naum Zuselevich Shor. Minimization methods for non-differentiable functions, volume 3. Springer Science & Business Media, 2012.
|
| 289 |
+
[39] David Silver, Aja Huang, Chris J Maddison, Arthur Guez, Laurent Sifre, George Van Den Driessche, Julian Schrittwieser, Ioannis Antonoglou, Veda Panneershelvam, Marc Lanctot, et al. Mastering the game of go with deep neural networks and tree search. nature, 529(7587):484, 2016.
|
| 290 |
+
[40] Sebastian U Stich. Local sgd converges fast and communicates little. arXiv preprint arXiv:1805.09767, 2018.
|
| 291 |
+
[41] Jianyu Wang and Gauri Joshi. Cooperative sgd: A unified framework for the design and analysis of communication-efficient sgd algorithms. arXiv preprint arXiv:1808.07576, 2018.
|
| 292 |
+
[42] Blake Woodworth, Brian Bullins, Ohad Shamir, and Nathan Srebro. The min-max complexity of distributed stochastic convex optimization with intermittent communication. arXiv preprint arXiv:2102.01583, 2021.
|
| 293 |
+
[43] Blake Woodworth, Kumar Kshitij Patel, and Nathan Srebro. Minibatch vs local sgd for heterogeneous distributed learning. arXiv preprint arXiv:2006.04735, 2020.
|
| 294 |
+
[44] Blake Woodworth, Kumar Kshitij Patel, Sebastian Stich, Zhen Dai, Brian Bullins, Brendan Mcmahan, Ohad Shamir, and Nathan Srebro. Is local sgd better than minibatch sgd? In International Conference on Machine Learning, pages 10334–10343. PMLR, 2020.
|
| 295 |
+
[45] Yang You, Igor Gitman, and Boris Ginsburg. Scaling sgd batch size to 32k for imagenet training. arXiv preprint arXiv:1708.03888, 6:12, 2017.
|
| 296 |
+
[46] Yang You, Jing Li, Sashank Reddi, Jonathan Hseu, Sanjiv Kumar, Srinadh Bhojanapalli, Xiaodan Song, James Demmel, Kurt Keutzer, and Cho-Jui Hsieh. Large batch optimization for deep learning: Training bert in 76 minutes. arXiv preprint arXiv:1904.00962, 2019.
|
| 297 |
+
[47] Hao Yu, Rong Jin, and Sen Yang. On the linear speedup analysis of communication efficient momentum SGD for distributed non-convex optimization. In Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, pages 7184-7193, 2019.
|
| 298 |
+
[48] Hao Yu, Sen Yang, and Shenghuo Zhu. Parallel restarted sgd with faster convergence and less communication: Demystifying why model averaging works for deep learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 5693-5700, 2019.
|
| 299 |
+
[49] Honglin Yuan, Manzil Zaheer, and Sashank Reddi. Federated composite optimization. In International Conference on Machine Learning, pages 12253-12266. PMLR, 2021.
|
| 300 |
+
[50] Bohang Zhang, Jikai Jin, Cong Fang, and Liwei Wang. Improved analysis of clipping algorithms for non-convex optimization. arXiv preprint arXiv:2010.02519, 2020.
|
| 301 |
+
[51] Jingzhao Zhang, Tianxing He, Suvrit Sra, and Ali Jadbabaie. Why gradient clipping accelerates training: A theoretical justification for adaptivity. arXiv preprint arXiv:1905.11881, 2019.
|
| 302 |
+
[52] Xinwei Zhang, Xiangyi Chen, Mingyi Hong, Zhiwei Steven Wu, and Jinfeng Yi. Understanding clipping for federated learning: Convergence and client-level differential privacy. arXiv preprint arXiv:2106.13673, 2021.
|
| 303 |
+
[53] Xinwei Zhang, Mingyi Hong, Sairaj Dhople, Wotao Yin, and Yang Liu. Fedpd: A federated learning framework with optimal rates and adaptivity to non-iid data. arXiv preprint arXiv:2005.11418, 2020.
|
| 304 |
+
|
| 305 |
+
[54] Yuchen Zhang, John C Duchi, and Martin J Wainwright. Communication-efficient algorithms for statistical optimization. The Journal of Machine Learning Research, 14(1):3321-3363, 2013.
|
| 306 |
+
[55] Fan Zhou and Guojing Cong. On the convergence properties of a $k$ -step averaging stochastic gradient descent algorithm for nonconvex optimization. arXiv preprint arXiv:1708.01012, 2017.
|
| 307 |
+
[56] Martin Zinkevich, Markus Weimer, Lihong Li, and Alex J Smola. Parallelized stochastic gradient descent. In Advances in neural information processing systems, pages 2595-2603, 2010.
|
| 308 |
+
|
| 309 |
+
# Checklist
|
| 310 |
+
|
| 311 |
+
1. For all authors...
|
| 312 |
+
|
| 313 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 314 |
+
(b) Did you describe the limitations of your work? [Yes] See the Section 6
|
| 315 |
+
(c) Did you discuss any potential negative societal impacts of your work? [N/A]
|
| 316 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 317 |
+
|
| 318 |
+
2. If you are including theoretical results...
|
| 319 |
+
|
| 320 |
+
(a) Did you state the full set of assumptions of all theoretical results? [Yes] See Section 3
|
| 321 |
+
(b) Did you include complete proofs of all theoretical results? [Yes] See the Appendix
|
| 322 |
+
|
| 323 |
+
3. If you ran experiments...
|
| 324 |
+
|
| 325 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes] in the supplemental material
|
| 326 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes] See the Section 5
|
| 327 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [No]
|
| 328 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [Yes] See the Section 5
|
| 329 |
+
|
| 330 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 331 |
+
|
| 332 |
+
(a) If your work uses existing assets, did you cite the creators? [Yes]
|
| 333 |
+
(b) Did you mention the license of the assets? [N/A]
|
| 334 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [No]
|
| 335 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
|
| 336 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [N/A]
|
| 337 |
+
|
| 338 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 339 |
+
|
| 340 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
|
| 341 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 342 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
acommunicationefficientdistributedgradientclippingalgorithmfortrainingdeepneuralnetworks/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:67600f69f173a521a8e40b09e9f71e0fb38d5126ec7259eb216e6cf6bc25ea44
|
| 3 |
+
size 328430
|
acommunicationefficientdistributedgradientclippingalgorithmfortrainingdeepneuralnetworks/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:40c4f9d6638169255e6a6f23ed4e01a89d24fd79f580d2c586807e9c59c013e4
|
| 3 |
+
size 554486
|
acomprehensivestudyonlargescalegraphtrainingbenchmarkingandrethinking/44f8a9af-1701-4f03-aea5-43b9bc59c0bc_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5993f007c7ea115800a8fe839893fdd5fe100f70795ecbe166885f200ad54374
|
| 3 |
+
size 89890
|
acomprehensivestudyonlargescalegraphtrainingbenchmarkingandrethinking/44f8a9af-1701-4f03-aea5-43b9bc59c0bc_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:884bc1a443f777a3db28e93c94fef23f709aef09b463c81cfdb44d3de0fb98d1
|
| 3 |
+
size 111643
|
acomprehensivestudyonlargescalegraphtrainingbenchmarkingandrethinking/44f8a9af-1701-4f03-aea5-43b9bc59c0bc_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f597f6c662c37384f4c2f541b5e4acd7b37f068f904ba4b632e70ed9fe17dc4f
|
| 3 |
+
size 1665002
|
acomprehensivestudyonlargescalegraphtrainingbenchmarkingandrethinking/full.md
ADDED
|
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Comprehensive Study on Large-Scale Graph Training: Benchmarking and Rethinking
|
| 2 |
+
|
| 3 |
+
Keyu Duan<sup>1</sup>, Zirui Liu<sup>2</sup>, Peihao Wang<sup>3</sup>, Wenqing Zheng<sup>3</sup>, Kaixiong Zhou<sup>2</sup>, Tianlong Chen<sup>3</sup>, Xia Hu<sup>2</sup>, Zhangyang Wang<sup>3</sup>
|
| 4 |
+
|
| 5 |
+
$^{1}$ National University of Singapore, $^{2}$ Rice University, $^{3}$ University of Texas at Austin {k.duan}@u.nus.edu; {z1105,Kaixiong.Zhou,xia.hu}@rice.edu; {peihaowang,w.zheng,tianlong.chen,atlaswang}@utexas.edu
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Large-scale graph training is a notoriously challenging problem for graph neural networks (GNNs). Due to the nature of evolving graph structures into the training process, vanilla GNNs usually fail to scale up, limited by the GPU memory space. Up to now, though numerous scalable GNN architectures have been proposed, we still lack a comprehensive survey and fair benchmark of this reservoir to find the rationale for designing scalable GNNs. To this end, we first systematically formulate the representative methods of large-scale graph training into several branches and further establish a fair and consistent benchmark for them by a greedy hyperparameter searching. In addition, regarding efficiency, we theoretically evaluate the time and space complexity of various branches and empirically compare them w.r.t GPU memory usage, throughput, and convergence. Furthermore, We analyze the pros and cons for various branches of scalable GNNs and then present a new ensembling training manner, named EnGCN, to address the existing issues. Remarkably, our proposed method has achieved new state-of-the-art (SOTA) performance on large-scale datasets. Our code is available at https://github.com/VITA-Group/Large_Scale_GCN_Benchmarking.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
The Graph Neural Networks (GNNs) have shown great prosperity in recent years [1-4], and have dominated a variety of applications, including recommender systems [5-7], social network analysis [8-10], scientific topological structure prediction (e.g. cellular function prediction [11, 12], molecular structure prediction [13, 14], and chemical compound retrieval [15]), and scalable point cloud segmentation [16, 17], etc. Although the message passing (MP) strategy provides GNNs' superior performance, the nature of evolving massive topological structures prevents MP-based GNNs [18-20, 1, 2, 4, 21, 22] from scaling to industrial-grade graph applications. Specifically, as MP requires nodes aggregating information from their neighbors, the integral graph structures are inevitably preserved during forward and backward propagation, thus occupying considerable running memory and time. For example [6], training a GNN-based recommendation system over 7.5 billion items requires three days on a 16-GPU cluster (384 GB memory in total).
|
| 14 |
+
|
| 15 |
+
To facilitate understanding, a unified formulation of MP with $K$ layers is presented as follows:
|
| 16 |
+
|
| 17 |
+
$$
|
| 18 |
+
\mathbf {X} ^ {(K)} = \mathbf {A} ^ {(K - 1)} \sigma \left(\mathbf {A} ^ {(K - 2)} \sigma \left(\dots \sigma \left(\mathbf {A} ^ {(0)} \mathbf {X} ^ {(0)} \mathbf {W} ^ {(0)}\right) \dots\right) \mathbf {W} ^ {(K - 2)}\right) \mathbf {W} ^ {(K - 1)}, \tag {1}
|
| 19 |
+
$$
|
| 20 |
+
|
| 21 |
+
where $\sigma$ is an activation function (e.g. ReLU) and $\mathbf{A}^{(l)}$ is the weighted adjacency matrix at the $l$ -th layer. As in Equ. (1), the key bottleneck of vanilla MP lies on $\mathbf{A}^{(l)}\mathbf{X}^{(l)}$ . For the memory usage, the entire sparse adjacency matrix is supposed to be stored in one GPU. As the number of nodes grows, it is quite challenging for a single GPU to afford the message passing over the full graph.
|
| 22 |
+
|
| 23 |
+
Up to now, massive efforts have been made to mitigate the aforementioned issue and scale up GNNs [23, 24, 3, 25-30]. Most of them focus on approximating the iterative full-batch MP to reduce the memory consumption for training within one single GPU. It is worth noting that we target at the algorithmic scope and do not extend to scalable infrastructure topics like distributed training with multiple GPUs [31, 32] and quantization [33]. Briefly, the previous works encompass two branches: Sampling-based and Decoupling-based. Namely, the former methods [3, 25, 24, 23, 34-36] perform batch training that utilizes sampled subgraphs as a small batch to approximate the full-batch MP so that the memory consumption is considerably reduced. The latter follows the principle of performing propagation $(\mathbf{A}^{(l)}\mathbf{X}^{(l)})$ and prediction $(\mathbf{X}^{(l)}\mathbf{W}^{(l)})$ separately, either precomputing the propagation [27, 28, 19, 37, 31] or post-processing with label propagation [29, 38]. Despite the prosperity of scalable GNNs, there are still plights under-explored: we lack a systematic study of the reservoir from the perspective of effectiveness and efficiency, without which it is unachievable to tell the rationale of the designing philosophy for large-scale graph learning in practice.
|
| 24 |
+
|
| 25 |
+
To this end, we first establish a consistent benchmark and provide a systematic study for large-scale graph training for both Sampling-based methods (Sec. 2.1) and Decoupling-based methods (Sec. 2.2). For each branch, we conduct a thorough investigation of the design strategy and implementation details of typical methods. Then, we carefully examine the sensitive hyperparameters and unify them in one "sweet spot" set by a linear greedy hyperparameter (HP) search (Sec. 3), i.e., iteratively searching the optimal value for an HP while fixing the others. For all concerned methods, the performance comparison is conducted on representative datasets of different scales, varying from about 80,000 nodes to 2,400,000, including Flickr [24], Reddit [3], and ogbn-products [11]. This step is a crucial precondition on our way to the ultimate as the configuration inconsistency significantly prohibits a fair comparison as well as the following analysis. Besides, regarding efficiency, we theoretically present the time and space complexities for the various branches, and empirically evaluate them on GPU memory usage, throughput, and convergence (Sec. 4). In addition to the benchmark, we further present a new ensembling training manner EnGCN (Sec. 5) to address the existing issues mentioned in our benchmark analysis (Sec. 5.1). Notably, via organically integrating with self-label-enhancement (SLE) [29], EnGCN achieves the new state-of-the-art (SOTA) on multiple large-scale datasets.
|
| 26 |
+
|
| 27 |
+
# 2 Formulations For Large-scale Graph Training Paradigms
|
| 28 |
+
|
| 29 |
+
# 2.1 Sampling-based Methods
|
| 30 |
+
|
| 31 |
+
Given the formulation of Equ. (1), sampling-based paradigm seeks the optimal way to perform batch training, such that each batch will meet the memory constraint of a single GPU for message passing. For completeness, we restate the unified formulation of sampling-based methods as follows:
|
| 32 |
+
|
| 33 |
+
$$
|
| 34 |
+
\mathbf {X} _ {\mathcal {B} _ {0}} ^ {(k)} = \widetilde {\mathbf {A}} _ {\mathcal {B} _ {1}} ^ {(k - 1)} \sigma \left(\widetilde {\mathbf {A}} _ {\mathcal {B} _ {2}} ^ {(k - 2)} \sigma \left(\dots \sigma \left(\widetilde {\mathbf {A}} _ {\mathcal {B} _ {K}} ^ {(0)} \mathbf {X} _ {\mathcal {B} _ {K}} ^ {(0)} \mathbf {W} ^ {(0)}\right) \dots\right) \mathbf {W} ^ {(K - 2)}\right) \mathbf {W} ^ {(K - 1)}, \tag {2}
|
| 35 |
+
$$
|
| 36 |
+
|
| 37 |
+
where $\mathcal{B}_l$ is the set of sampled nodes for the $l$ -th layer, and $\widetilde{\mathbf{A}}^{(l)}$ is the adjacency matrix for the $l$ -th layer sampled from the full graph. Given the local view of GNN — one node's representation is only related to its neighbors — a straightforward way for unbiased batch training would be $\mathcal{B}_{l+1} = \mathcal{N}(\mathcal{B}_l)$ , where $\mathcal{N}$ denotes the set of neighbors. $\mathcal{B}_0$ is randomly sampled according to the uniform distribution. Notably, this batch training style could achieve SOTA performance but also suffers from the "neighbor explosion" problem, where the time consumption and memory usage grow exponentially with the GNN depth, causing significant memory and time overhead. To mitigate this, a number of sampling-based methods were proposed. The key difference among them is how $\{\mathcal{B}_0, \dots, \mathcal{B}_{K-1}, \mathcal{B}_K\}$ are sampled. Given a large-scale graph $\mathcal{G} = (\mathcal{V}, \mathcal{E})$ , there are three categories of widely-used sampling strategies:
|
| 38 |
+
|
| 39 |
+
Node-wise Sampling [3] $\mathcal{B}_{l + 1} = \bigcup_{v\in \mathcal{B}_l}\{u\mid u\sim Q\cdot \mathbb{P}_{\mathcal{N}(v)}\}$ , where $\mathbb{P}$ is a sampling distribution; $\mathcal{N}(v)$ is the sampling space, i.e., the 1-hop neighbors of $v$ ; and $Q$ denotes the number of samples. The representative node-wise sampling method is:
|
| 40 |
+
|
| 41 |
+
$\star$ GraphSAGE [3]: In GraphSAGE, $\mathbb{P}$ is the uniform distribution.
|
| 42 |
+
|
| 43 |
+
Compared with the aforementioned naive batch training, the node-wise sampling [3] alleviates the "node explosion" problem by fixing the number of sampled neighbors $Q$ for each node. It thus reduces the space complexity from $D^K$ to $Q^K$ , where $D$ is the averaged node degree. However, as $Q$ is not far less than $D$ in order of magnitude, such mitigation is moderate, which is empirically validated by our empirical results in Sec. 3 and Sec. 4.
|
| 44 |
+
|
| 45 |
+
Layer-wise Sampling [25, 26]. $\mathcal{B}_{l + 1} = \{u\mid u\sim Q\cdot \mathbb{P}_{\mathcal{N}(\mathcal{B}_l)}\}$ , where $\mathcal{N}(\mathcal{B}_l) = \bigcup_{v\in \mathcal{B}_l}\mathcal{N}(v)$ denotes the union of 1-hop neighbors of all nodes in $\mathcal{B}_l$ . We introduce a couple of layer-wise sampling methods as follows.
|
| 46 |
+
|
| 47 |
+
$\star$ FastGCN [25]: The sampling distribution $\mathbb{P}$ is designed regarding the node degree, where the probability for node $u$ of being sampled is $p(u)\propto ||\hat{\mathbf{A}} (u,:)||^2$
|
| 48 |
+
$\star$ LADIES [26]: More recently, based on FastGCN, Zou et.al. [26] propose LADIES that extends the sampling space from $\mathcal{N}(\mathcal{B}_l)$ to $\mathcal{N}(\mathcal{B}_l) \cup \mathcal{B}_l$ by adding the self-loops.
|
| 49 |
+
|
| 50 |
+
Notably, Compared with the node-wise sampling, the layer-wise sampling essentially solves the "neighbor explosion" problem by fixing the number of overall sampled nodes in a layer to $Q$ . However, the layer-wisely induced adjacency matrix is usually sparser than the others, which accounts for its sub-optimal performance in practice.
|
| 51 |
+
|
| 52 |
+
Subgraph-wise Sampling [23, 24]. $\mathcal{B}_K = \mathcal{B}_{K - 1} = \dots = \mathcal{B}_0 = \{u\mid u\sim Q\cdot \mathbb{P}_{\mathcal{G}}\}$ . In the subgraph-wise sampling, all layers share the same subgraph induced from the entire graph $\mathcal{G}$ based on a specific sampling strategy $\mathbb{P}_{\mathcal{G}}$ , such that the sampled nodes are confined in the subgraph. Typically, this sampling strategy has two representative works:
|
| 53 |
+
|
| 54 |
+
$\star$ ClusterGCN [23]: ClusterGCN first partitions the entire graph into clusters based on some graph partition algorithms, e.g. METIS [39], and then select several clusters to form a batch.
|
| 55 |
+
$\star$ GraphSAINT [24]: GraphSAINT samples a subset of nodes based on sampling strategy $\mathbb{P}_{\mathcal{G}}$ and then induces the corresponding subgraph as a batch. The commonly-used sampling strategies include: (i) node sampler: $\mathbb{P}(u) = ||\widetilde{\mathbf{A}}_{:,u}||^2$ , (ii) edge sampler: $\mathbb{P}(u,v) = \frac{1}{deg(u)} + \frac{1}{deg(v)}$ , and (iii) random walk sampler. They are illustrated in Appendix A1.1.
|
| 56 |
+
|
| 57 |
+
# 2.2 Decoupling-based Methods
|
| 58 |
+
|
| 59 |
+
Training GNNs with full-batch message passing at each epoch is not plausible. In this section, we summarize another line of scalable GNNs which decouple the message passing from GPU training to CPUs. Specifically, the message passing is conducted only once at CPUs accompanied by large accessible memory. Depending on the processing order, there are two typical ways to decouple these two operations: (i) pre-processing and (ii) post-processing.
|
| 60 |
+
|
| 61 |
+
Pre-processing: MP precomputing [27-29]. Recalling Equ. (1), without loss of generalization, we assume that $\mathbf{A}^{(k - 1)} = \mathbf{A}^{(k - 2)} = \dots \mathbf{A}^{(0)} = \mathbf{A}$ , i.e. the topological structure for the entire graph remains the same during forward propagation, meeting most of the cases. To decouple the two operations, message passing (AX) and feature transformation (XW), we can first pre-compute the propagated node representations and then train a neural network for the downstream task based on these fused representations:
|
| 62 |
+
|
| 63 |
+
$$
|
| 64 |
+
\underbrace {\mathbf {X} ^ {l} = \mathbf {A} ^ {l} \mathbf {X}} _ {\text {p r e c o m p u t i n g}}, \quad \underbrace {\bar {\mathbf {X}} = \rho \left(\mathbf {X} , \mathbf {X} ^ {1} , \cdots , \mathbf {X} ^ {K}\right) , \quad \mathbf {Y} = f _ {\theta} (\bar {\mathbf {X}})} _ {\text {e n d - t o - e n d t r a i n i n g o n a G P U}}, \tag {3}
|
| 65 |
+
$$
|
| 66 |
+
|
| 67 |
+
where $\mathbf{X}^l$ can be regarded as the node representation aggregating $l$ -hop neighborhood information; $K$ is the largest propagation hop; $\rho(\cdot)$ is a function that combines the aggregated features from different hops; and $f_{\theta}(\cdot)$ is a feature mapping function parameterized by $\theta$ . We summarize three existing pre-computing schemes as follows.
|
| 68 |
+
|
| 69 |
+
$\star$ SGC [27]: SGC leverages the node representations aggregated with k hops and feeds the resultant features to a full-connected layer. We can formulate this scheme by letting $\rho (\cdot)$ select the last element $\mathbf{X}^K$ and $f_{\theta}(\cdot)$ be a linear layer with readout activation: $\mathbf{Y} = \sigma (\mathbf{X}^{K}\Theta)$ .
|
| 70 |
+
$\star$ SIGN [28]: SIGN concatenates features from different hops and then fuse them as the final node representation via a linear layer. To be more specific, $\rho (\cdot)$ is defined as $\bar{\mathbf{X}} = \left[\mathbf{X}\quad \mathbf{X}^{1}\quad \dots \quad \mathbf{X}^{K}\right]\Omega$ where $\Omega$ is a transformation matrix, and $f_{\theta}(\cdot)$ is defined as a linear readout layer $\mathbf{Y} = \sigma (\bar{\mathbf{X}}\Theta)$ .
|
| 71 |
+
$\star$ SAGN [29]: SAGN adopts attention mechanism to combine feature representations from $K$ hops: $\bar{\mathbf{X}} = \sum_{l=1}^{K} \mathbf{T}^l \mathbf{X}^l$ , where $\mathbf{T}^l$ is a diagonal matrix whose diagonal corresponds to the attention weight for each node of $k$ -hop information. The attention weight for the $i$ -th node is calculated by $T_i^k = \text{softmax}_K(\text{LeakyReLU}(\boldsymbol{u}^T \mathbf{X}_i + \boldsymbol{v}^T \mathbf{X}_j^k))$ , where the subscripts slices the data matrices along the row. The feature mapping function is implemented by an MLP block with a skip connection to initial features: $\mathbf{Y} = \text{MLP}_\theta(\bar{\mathbf{X}} + \mathbf{X} \Theta_r)$ .
|
| 72 |
+
|
| 73 |
+
Post-processing: Label Propagation. The label propagation algorithms [40-44, 38, 45] diffuse labels in the graph and make predictions based on the diffused labels. It is a classical family of graph algorithms for transductive learning, where the nodes for testing are used in the training procedure. The label propagation can be written in a unified form as follows:
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
\mathbf {Y} ^ {(l)} = \alpha \mathbf {A} \mathbf {Y} ^ {(l - 1)} + (1 - \alpha) \mathbf {G}. \tag {4}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
The diffusion procedure iterates the formula above with $l$ for multiple times to guarantee convergence. It requires two sets of inputs: $(i)$ the stack of the label embeddings of all nodes, denoted as $\mathbf{Y}^{(0)}\in \mathbb{R}^{N\times c}$ , where $c$ is the number of classes. In our implementation, the $\mathbf{Y}^{(0)}$ is the output of a trained MLP model [38]. $(ii)$ the diffusion embedding, denoted as $\mathbf{G}\in \mathbb{R}^{N\times c}$ that propagate themselves across the edges in the graph. Depending on how the diffusion embeddings of unlabeled nodes are computed, two types of $\mathbf{G}$ are summarized as follows:
|
| 80 |
+
|
| 81 |
+
$\star$ Zeros [40]: $\mathbf{G}_{i,:} = \left\{ \begin{array}{ll}\hat{\mathbf{Y}}_{i,:} - \alpha \mathbf{A}\mathbf{Y}_{i,:}^{(k)}, & i\in \mathcal{T}_{train}\\ \mathbf{0}, & otherwise \end{array} \right.$ where $\mathcal{T}_{train}$ denotes the training set and $\hat{\mathbf{Y}}$ is the stack of true labels. For zeros, $\mathbf{Y}^{(0)} = \mathbf{G}$ .
|
| 82 |
+
|
| 83 |
+
$\star$ Residual [38]: $\mathbf{G}_{i,:} = \left\{ \begin{array}{ll}\hat{\mathbf{Y}}_i, & v_i\in T_{train}\\ \hat{\mathbf{Z}}_i, & otherwise \end{array} \right.$ , where $\hat{\mathbf{Z}} = \mathbf{Z} + \hat{\mathbf{E}}$ . $\mathbf{Z}$ is the predictions of a trained simple neural network, e.g. MLP, and $\hat{\mathbf{E}}$ is an residual error matrix, which is optimized iteratively for multiple times by $\mathbf{E}^{t + 1} = (1 - \alpha)\mathbf{E} + \alpha \mathbf{A}\mathbf{E}^{(t)}$ , where $\mathbf{E} = \mathbf{Z} - \hat{\mathbf{Y}}$ and $\mathbf{E}^{(0)} = \mathbf{E}$
|
| 84 |
+
|
| 85 |
+
# 2.3 More Related Works
|
| 86 |
+
|
| 87 |
+
Model-agnostic Tricks. Besides the training methods as introduced above, there are some model-agnostic tricks that have been empirically confirmed to be effective for boosting large-scale graph training. Although those add-ons cannot be included into our benchmarking analysis, it is of equal importance to introduce them for completeness. Here we briefly introduce two representative ones:
|
| 88 |
+
|
| 89 |
+
$\star$ Self-Label-Enhanced (SLE) [29]: SLE includes two individual tricks, self training and label augmentation. Here we use $\mathcal{T}$ denoting the training set. For self training, the unlabeled nodes with high confidence (larger than a pre-defined threshold) are added to $\mathcal{T}$ after a certain number of training epochs. For label augmentation, it trains an additional model $\Phi (\cdot)$ . The forward propagation can be formulated as $out = \Phi (\hat{\mathbf{A}}^k\mathbf{Y}_\mathcal{T})$ . out is added to the main model to make the final prediction.
|
| 90 |
+
|
| 91 |
+
$\star$ GIANT [46]: In general, node features are usually pre-embed with graph-agnostic language models, such as word2vec [47] and BERT [48]. Recently, Chien et.al. propose a graph-related node feature extraction framework (GIANT), which embeds the raw texts to numerical features by taking advantage of graph structures, to help boost the performance of GNNs for the downstream tasks.
|
| 92 |
+
|
| 93 |
+
Memory-based GNN Training. Focus on mitigating the "Neighbor Explosion" problem of full-batch training as introduced, memory-based GNNs [49, 50] try to save the GPU memory with different techniques while including all neighbor nodes into computing during the message passing. GAS [49] incorporates historical embeddings [34] to provably maintain the expressive power of full-batch GNN. VQ-GNN [50] utilizes vector quantization to scale convolutional-based GNN and resemble the performance of full-batch message passing by learning an additional quantized feature matrix and a corresponding low-rank adjacent matrix.
|
| 94 |
+
|
| 95 |
+
# 3 Benchmarking Over Effectiveness
|
| 96 |
+
|
| 97 |
+
# 3.1 Implementation Details
|
| 98 |
+
|
| 99 |
+
We test numerous large-scale graph training methods with a greedy hyperparameter (HP) search to find their sweet spot and the best performance for a fair comparison. The search space is defined in Table 1. The access and statistics of all used datasets are introduced in Appendix A3.1. Particularly, for label propagation, we select two representative algorithms: Huang et.al. [38], the residual diffusion type, and Zhu et.al. [40], the zeros type. The number of propagation is the maximum iteration $k$ . The aggregation ratio is $\alpha$ as in Equ. (4), and the number of MLP layers is the number of MLP layers that precedes the label propagation module following Huang et.al. [38].
|
| 100 |
+
|
| 101 |
+
Limited by space, we select five representative approaches that covers all branches as we introduced, including GraphSAGE [3], LADIES [26], ClusterGCN [23], SAGN [29], and C&S [38]. We illustrate the selected results in Fig. 1 and the results of other methods in Fig. A5. For each subplot, from left to right, each column denotes the search results for one HP. Once one HP was searched, its value will be fixed to the best results for the rest HP searching. Iteratively, we obtain the best performance in the last column. For convenience and clarity, we list the
|
| 102 |
+
|
| 103 |
+
searched optimal hyperparameter settings of all test methods in Table A5.
|
| 104 |
+
|
| 105 |
+
Table 1: The search space of hyperparameters for benchmarked methods.
|
| 106 |
+
|
| 107 |
+
<table><tr><td>Category</td><td>Hyperparameter (Abbr.)</td><td>Candidates</td></tr><tr><td rowspan="7">Sampling & Precomputing</td><td>Learning rate (LR)</td><td>{1e-2*, 1e-3, 1e-4}</td></tr><tr><td>Weight Decay (WD)</td><td>{1e-4*, 2e-4, 4e-4}</td></tr><tr><td>Dropout Rate (DP)</td><td>{0.1, 0.2*, 0.5, 0.7}</td></tr><tr><td>Training Epochs (#E)</td><td>{20, 30, 40, 50*}</td></tr><tr><td>Hidden Dimension (HD)</td><td>128*, 256, 512</td></tr><tr><td># layers (#L)</td><td>{2*, 4, 6}</td></tr><tr><td>Batch sizea(BS)</td><td>{1000*, 2000, 5000}</td></tr><tr><td rowspan="6">LP</td><td>Diffusion Type (DT)</td><td>{residual*, zeros}</td></tr><tr><td># Propagations (#Prop)</td><td>{2, 20*, 50}</td></tr><tr><td>Aggregation Ratio (AR)</td><td>{0.5, 0.75*, 0.9, 0.99}</td></tr><tr><td>Adj. Norm (Adj.)</td><td>{D-1A, AD-1, D-1/2AD-1/2*}</td></tr><tr><td>Auto Scale (AS)</td><td>{True*, False}</td></tr><tr><td># MLP Layers (#ML)</td><td>{2*, 3, 4}</td></tr></table>
|
| 108 |
+
|
| 109 |
+
* marks the default value
|
| 110 |
+
${}^{a}$ we do not search batch size for precomputing based methods since
|
| 111 |
+
they do not follow a sample-training style.
|
| 112 |
+
|
| 113 |
+
# 3.2 Experimental Observations
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
Figure 1: The greedy hyperparameter searching results for selected representative methods. The x-axis denotes the searched HPs, where the abbreviations are consistent with Table 1.
|
| 117 |
+
|
| 118 |
+

|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
|
| 122 |
+

|
| 123 |
+
|
| 124 |
+

|
| 125 |
+
|
| 126 |
+
Obs. 1. Sampling-based methods are more sensitive to the hyperparameters related to MP. According to Fig. 1, in comparison with precomputing, all sampling-based methods are non-sensitive to hyperparameters (HPs) that are related to the feature transformation matrices, including weight decay, dropout, and hidden dimension; but particularly sensitive to the MP-related HPs, including the number of layers and batch size. For model depth, sampling-based methods generally achieve the sweet spots when the number of layers is confined to shallow and suffers from the oversmoothing problem [51-55] as the GNN models go deeper. However, this issue is moderately mitigated in decoupling-based methods as the model depth does not align with the number of MP hop.
|
| 127 |
+
|
| 128 |
+
Obs. 2. Sampling-based methods' performance is nearly positive-correlated with the training batch size. According to the results of the last column of all sampling-based methods, the performance of the layer-wise and subgraph-wise sampling methods is roughly proportional to the batch size. Expectedly, the model performance could further increase as the batch size grows till the upper bound of full-batch training because more links can be preserved. Particularly, in our experiment, we set the number of sampled neighbors of node-wise sampling to a large threshold such that the performance of GraphSAGE can be regarded as full-batch training's. It can be easily found that the performance of sampling-based methods is inferior to full-batching training (GraphSAGE), further proving our conjecture that the missing links by sampling are non-trivial.
|
| 129 |
+
|
| 130 |
+
Obs. 3. Precomputing-based methods generally perform better on larger datasets. As show in Fig. 1 and Fig. A5, C&S (label propagation) outperforms the full-batch training (GraphSAGE as introduced in Obs. 2) on the largest dataset ogb-products by a large margin of $4.5\%$ , although both two branches have on-par performance on smaller datasets. Remarkably, our searched results for GraphSAGE and LP on ogbn-products also reached better performance, compared with the ones on the OGB leaderboard<sup>1</sup>. Noticing that GraphSAGE encounters the out-of-memory $(\mathrm{OOM})^2$ runtime
|
| 131 |
+
|
| 132 |
+
error with increasing depth, the observation partially indicates that, limited by model depth and neighbor explosion problem, it is possibly not powerful for extremely large-scale graphs to learn expressive representations.
|
| 133 |
+
|
| 134 |
+
# 4 Benchmarking Over Efficiency
|
| 135 |
+
|
| 136 |
+
# 4.1 Time And Space Complexity
|
| 137 |
+
|
| 138 |
+
In this section, we present another benchmark regarding the efficiency of scalable graph training methods. Firstly, we briefly summarize a general complexity analysis in Table 2. For sampling-based methods, we note that the time complexity is for training GNNs by iterating over the whole graph. The time complexity $\mathcal{O}(L||\mathbf{A}||_0D + LND^2)$ consists of two parts. The first part $L||\mathbf{A}||_0D$ is from the Sparse-Dense Matrix Multiplication,i.e., $\mathbf{AX}$ . The second part $LND^2$ is from the normal Dense-Dense Matrix Multiplication, i.e., (AX)W. Regarding the space complexity, we need to store the activations of each layer in memory, which has a $\mathcal{O}(bLD)$ space complexity. Note that we ignore the memory usage of model weights and the optimizer here since they are negligible compared to the activations. For decoupling-based methods, the training paradigm is simplified as MLPs, and thus the complexity is the same as the traditional mini-batch training. We do not include label propagation in our analysis since it can be trained totally on CPUs.
|
| 139 |
+
|
| 140 |
+
# 4.2 Throughput And Memory Usage
|
| 141 |
+
|
| 142 |
+
Implementation Details To fairly benchmark the training speed and memory usage for large-scale graph training methods, we empirically evaluate the throughputs and actual memory for various methods during the training procedure. Here "Throughput" measures how many times can we complete the training steps within a second. Note that we omit the label propagation methods since it is not
|
| 143 |
+
|
| 144 |
+
Table 2: The time and space complexity for training GNNs with sampling-based and decoupling-based methods, where $b$ is the averaged number of nodes in the sampled subgraph and $r$ is the averaged number of neighbors of each node. Here we do not consider the complexity of pre-processing since it can be done in CPUs.
|
| 145 |
+
|
| 146 |
+
<table><tr><td>Category</td><td>Time Complexity</td><td>Space Complexity</td></tr><tr><td>Node-wise Sampling [3]</td><td>O(rLND2)</td><td>O(brLD)</td></tr><tr><td>Layer-wise Sampling [28, 26]</td><td>O(rLND2)</td><td>O(brLD)</td></tr><tr><td>Subgraph-wise Sampling [23, 24]</td><td>O(L||A||0D + LND2)</td><td>O(bLD)</td></tr><tr><td>Precomputing [27-29]</td><td>O(LND2)</td><td>O(bLD)</td></tr></table>
|
| 147 |
+
|
| 148 |
+
trained by backward propagation. We provide our implementation details for computing the throughput and memory usage in section A3.2. We report the hardware throughput and activation usage in Table 3. We summarize three main observations.
|
| 149 |
+
|
| 150 |
+
Table 3: The memory usage of activations and the hardware throughput (higher is better). The hardware here is an RTX 3090 GPU.
|
| 151 |
+
|
| 152 |
+
<table><tr><td></td><td colspan="2">Flickr</td><td colspan="2">Reddit</td><td colspan="2">ogbn-products</td></tr><tr><td></td><td>Act Mem. (MB)</td><td>Throughput (iteration/s)</td><td>Act Mem. (MB)</td><td>Throughput (iteration/s)</td><td>Act Mem. (MB)</td><td>Throughput (iteration/s)</td></tr><tr><td>GraphSAGE</td><td>230.63</td><td>65.96</td><td>687.21</td><td>27.62</td><td>415.94</td><td>37.69</td></tr><tr><td>ClusterGCN</td><td>18.45</td><td>171.46</td><td>20.84</td><td>79.91</td><td>10.62</td><td>156.01</td></tr><tr><td>GraphSAINT</td><td>16.51</td><td>151.77</td><td>21.25</td><td>70.68</td><td>10.95</td><td>143.51</td></tr><tr><td>FastGCN</td><td>19.77</td><td>226.93</td><td>22.53</td><td>87.94</td><td>11.54</td><td>93.05</td></tr><tr><td>LADIES</td><td>33.26</td><td>195.34</td><td>43.21</td><td>116.46</td><td>20.33</td><td>93.47</td></tr><tr><td>SGC</td><td>0.01</td><td>115.02</td><td>0.02</td><td>89.91</td><td>0.01</td><td>267.31</td></tr><tr><td>SIGN</td><td>16.99</td><td>96.20</td><td>16.38</td><td>75.33</td><td>16.21</td><td>208.52</td></tr><tr><td>SAGN</td><td>72.94</td><td>55.28</td><td>72.37</td><td>43.45</td><td>71.81</td><td>80.04</td></tr></table>
|
| 153 |
+
|
| 154 |
+
Obs. 4. GraphSAGE is significantly slower and occupies more memory compared to other baselines. This is partially because of the large neighbor sampling threshold we set and inherently owing to its neighborhood explosion. Namely, to compute the loss for a single node, it requires the neighbors' embeddings at the down-streaming layer recursively. Please refer to Sec. 2.1 for details.
|
| 155 |
+
|
| 156 |
+
Obs. 5. SGC does not occupy any activation memory. As shown in Table 3, SGC only occupies about $0.01\mathrm{MB}$ actual memory during training. This is because SGC only has one linear layer and the activation is exactly the input feature matrix, which has been stored in memory. Thus, it is not accounted towards the activation memory.
|
| 157 |
+
|
| 158 |
+
Obs. 6. In general, the speed of decoupling-based methods is comparable to sampling-based methods. Besides the scale of sparse adjacency matrix, the feature set size is also crucial for
|
| 159 |
+
|
| 160 |
+
occupying memory. Although precomputing-based methods avoid storing the graph structures in a GPU, they may take advantage of multi-hop features, where the corresponding memory is multiplied many times.
|
| 161 |
+
|
| 162 |
+
# 4.3 Convergence Analysis
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
|
| 166 |
+

|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
|
| 170 |
+

|
| 171 |
+
Figure 2: The empirical results of convergence for sampling-based methods (real line) and precomputing-based methods (dash line).
|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
|
| 175 |
+

|
| 176 |
+
|
| 177 |
+
For convergence analysis, we test all benchmarked methods on Flickr, Reddit, and ogbn-products. The training loss and validation accuracy (val_acc) are shown in Fig. 2. Based on the empirical results, we summarize the main observation as follows:
|
| 178 |
+
|
| 179 |
+
Obs. 7. In general, precomputing-based methods have faster and more stable convergence than sampling-based methods. This is because sampling-based methods usually incur a variance among batches that poses unstable and slow convergence [35]. However, precomputing-based methods mitigate this by moving message passing from backward propagation to the precomputing stage.
|
| 180 |
+
|
| 181 |
+
# 5 EnGCN: Rethinking Graph Convolutional Networks With Ensembling
|
| 182 |
+
|
| 183 |
+
# 5.1 An Empirical Summary: Pros And Cons
|
| 184 |
+
|
| 185 |
+
Based on our benchmark results in section 3 and 4, we summarize the advantages (marked as Pros) and constraints (marked as Cons) for different branches as follows. Besides the summary, we also provide a joint comparison of effectiveness and efficiency for various methods in Appendix A2.2.
|
| 186 |
+
|
| 187 |
+
$\star$ Sampling-based: (Pros) Sampling subgraphs into GPU training allows them taking advantage of numerous graph convolution layers, such as GCN [1], GraphSAGE [3], and GIN [4], and this is flexible for them to design specific architecture for different downstream tasks, e.g. node classification and graph classification. (Cons) As aforementioned, sampling-based methods suffers from link sparsity [24] (section 3) and unstable and low convergence (section 4) problems, both of which prevent sampling-based methods from achieving SOTA performance.
|
| 188 |
+
|
| 189 |
+
$\star$ Precomputing-based: (Pros) Decoupling message passing from GPU training to CPU precomputing allows precomputing-based methods utilize a mixture of feature transformation units (e.g. attention mechanism and MLP) to train in a well-studied manner. This guarantees stable and fast convergence (section 4). Particularly, integrating with the add-ons like SLE [29] and GIANT [46], precomputing-based methods achieve SOTA performance on large-scale open graph benchmark (ogb) [11] datasets. (Cons) In general, precomputing-based methods at least occupy a CPU memory space of $\mathcal{O}(LNd)$ , where $L$ is the number of layers; $N$ is the number of nodes; and $d$ is the dimension of input features. In comparison, it is $L$ times as large as the others, which is not affordable for extremely large-scale graphs. For example, containing about 111 million nodes, the largest ogb dataset, ogbn-papers100M, requires approximately 57 Gigabytes (GB) to store the initial feature matrix, given the data type is float and the dimension of features is 128. As the number of layers increases, the required CPU memory space will grow proportionally to an unaffordable number.
|
| 190 |
+
|
| 191 |
+
$\star$ Label Propagation: (Pros) As a traditional branch of graph learning algorithm, label propagation is a simple but effective add-on as a post-processing trick nowadays. Because of its mode-agnostic nature, it can be simply attached to the end of any graph representation learning algorithm to boost the final prediction. (Cons) Label propagation has many additional sensitive hyperparameters as we introduced in Table 1 and is specifically designed for the node classification task.
|
| 192 |
+
|
| 193 |
+
# 5.2 Motivation and Related Works
|
| 194 |
+
|
| 195 |
+
To address the above constraints of sampling-based methods and precomputing-based methods, let us first recap the full-batch message passing in Equ. (1). We reformulate it into a more general form:
|
| 196 |
+
|
| 197 |
+
$$
|
| 198 |
+
\mathbf {X} ^ {(k)} = \Phi^ {(k - 1)} \Bigg (\mathbf {A} \Phi^ {(k - 2)} \big (\dots \mathbf {A} \Phi^ {(0)} (\mathbf {A X} ^ {(0)}) \big) \Bigg),
|
| 199 |
+
$$
|
| 200 |
+
|
| 201 |
+
where $\Phi^{(i)}$ denotes the feature mapping model for the $i$ -th layer. To make the message passing scalable, following the rationale of decoupling, we propose a different training scheme from precomputing: Instead of end-to-end training, we sequentially train the $\Phi_s$ in a layer-wise manner. In this way, no precomputing is required and thus the corresponding constraint of CPU memory occupation is essentially mitigated. To elaborate on this, we present the layer-wise training manner:
|
| 202 |
+
|
| 203 |
+
$$
|
| 204 |
+
\underbrace {\mathbf {X} ^ {(l)} = \mathbf {A} \mathbf {X} ^ {(l - 1)}} _ {\text {M e s s a g e p a s s i n g o n C P U s}}, \quad \underbrace {\mathbf {Z} ^ {(l)} = \Phi^ {(l)} (\mathbf {X} ^ {(l)})} _ {\text {f o r w o r d p r o p a g a t i o n}}, \quad \underbrace {\nabla \Phi^ {(l)} = \nabla \mathcal {L} (\mathbf {Z} ^ {(l)} , \mathbf {Y})} _ {\text {b a c k w a r d p r o p a g t i o n}}. \tag {5}
|
| 205 |
+
$$
|
| 206 |
+
|
| 207 |
+
From layer 0 to $k$ , we do message passing once and then train $\Phi^{(l)}$ in batches for epochs. Finally, one can simply use the output of model $\Phi$ as the prediction. Besides, from the perspective of ensembling, the models $\Phi$ can be naturally viewed as a set of weak learners trained on multiple views of the input $\mathbf{X}$ . As a result, it is compatible to use ensembling to boost the final predictions, such as majority voting. In addition, Based on our empirical results, this training manner is capable of achieving SOTA methods on relatively small datasets without exhaustive finetuning. To further boost the performance, we organically integrate SLE, which has achieved new SOTA performance on several representative datasets. We name this model EnGCN (Ensembling GCN).
|
| 208 |
+
|
| 209 |
+
Related Works. Interestingly, the layer-wise training manner and majority voting are naturally consistent with the boosting algorithms, where we sequentially train weak learners with instance reweighting and make the final prediction by majority voting. In the scope of graph representation learning, AdaGCN [56] first applies adaboosting [57, 58] to address the oversmoothing problem of deep GCNs. Though focusing on different topics, AdaGCN has a similar training scheme as ours. Therefore, we implement a scalable version for it, which is included as a SOTA baseline in our experiment. In addition, AdaClusterGCN [59] proposed an adaboosting application that ensembles weak learners trained on different clusters.
|
| 210 |
+
|
| 211 |
+
# 5.3 Methodology
|
| 212 |
+
|
| 213 |
+
Considering a large-scale graph $\mathcal{G} = (\mathbf{A},\mathbf{X},\mathbf{y})$ , where $\mathbf{A}$ is the adjacent matrix, $\mathbf{X}$ is the node features, and $\mathbf{y}$ is the true labels. Respectively, $\mathcal{T}_{train}$ , $\mathcal{T}_{val}$ and $\mathcal{T}_{test}$ denotes the training, validation, and test set. Let $\mathbf{X}^{(l)}$ and $\mathbf{Y}^{(l)}$ denote the embeddings of node features and labels at the $l$ -th layer, respectively. We use $\widetilde{\mathbf{y}}^{(l)}$ and $\widetilde{\mathcal{T}}_{train}^{(l)}$ denoting the pseudo labels, pseudo training set for self training at layer $l$ .
|
| 214 |
+
|
| 215 |
+
Initialization. we initialise several important matrices and vectors:
|
| 216 |
+
|
| 217 |
+
$$
|
| 218 |
+
\mathbf {X} ^ {(0)} = \mathbf {X}, \quad \mathbf {Y} _ {i,:} ^ {(0)} = \left\{ \begin{array}{l l} \mathrm {o n e \_ h o t} (\mathbf {y} _ {i}), & i \in \mathcal {T} _ {t r a i n} \\ \mathbf {0}, & \text {o t h e r w i s e} \end{array} , \quad \widetilde {\mathcal {T}} _ {t r a i n} ^ {(0)} = \mathcal {T} _ {t r a i n}, \quad \tilde {\mathbf {y}} _ {i} ^ {(0)} = \left\{ \begin{array}{l l} \mathbf {y} _ {i}, & i \in \widetilde {\mathcal {T}} _ {t r a i n} \\ \mathbf {0}, & \text {o t h e r w i s e} \end{array} \right. \right.
|
| 219 |
+
$$
|
| 220 |
+
|
| 221 |
+
Layer-wise Training. From 0 to $k$ , we follow a layer-wise training manner, where each training stage contains three phases: pre-processing, training, and post-processing. For layer $l$ , the three phases are introduced as follows.
|
| 222 |
+
|
| 223 |
+
Pre-processing. For pre-processing, we precompute $\mathbf{X}^{(l)}$ and $\mathbf{Y}^{(l)}$ in CPUs as follows:
|
| 224 |
+
|
| 225 |
+
$$
|
| 226 |
+
\mathbf {X} ^ {(l)} = \hat {\mathbf {A}} \mathbf {X} ^ {(l - 1)}, \quad \mathbf {Y} ^ {(l)} = \hat {\mathbf {A}} \mathbf {Y} ^ {(l - 1)}, \tag {6}
|
| 227 |
+
$$
|
| 228 |
+
|
| 229 |
+
where $\bar{\mathbf{A}}$ is symmetrically normalized [1]. Note that pre-processing is skipped when $l = 0$ .
|
| 230 |
+
|
| 231 |
+
Training. We solely train two simple models till convergence, which empirically takes dozens of epochs on real-world datasets. The forward propagation is:
|
| 232 |
+
|
| 233 |
+
$$
|
| 234 |
+
\operatorname {o u t} ^ {(l)} = \Omega \left(\mathbf {X} ^ {(l)}, \mathbf {Y} ^ {(l)}\right) = \Phi \left(\mathbf {X} ^ {(l)}\right) + \Psi \left(\mathbf {Y} ^ {(l)}\right), \tag {7}
|
| 235 |
+
$$
|
| 236 |
+
|
| 237 |
+
where $\Phi$ and $\Psi$ are two MLP models that are shared through all layers. Specifically, when $l = 0$ , the forward propagation is reduced to $\mathrm{out}^{(0)} = \Phi (\mathbf{X}^{(0)})$ where $\Psi$ is not evolved. This is because the initialized $\mathbf{Y}^{(0)}$ contains many zero vectors and will pose the overfitting problem. For backward propagation, we compute the training loss using the pseudo labels $\tilde{\mathbf{y}}^{(l)}$ instead of $\mathbf{y}^{(l)}$ .
|
| 238 |
+
|
| 239 |
+
post-processing. After obtaining the trained models, we save the state of them as $\Omega^{(l)} = (\Phi^{(l)},\Psi^{(l)})$ for ensembling. Furthermore, self training is used to enhance the training set. Following Sun et.al. [29], the pseudo labels and pseudo training masks are updated as follows.
|
| 240 |
+
|
| 241 |
+
$$
|
| 242 |
+
\widetilde {\mathcal {T}} _ {t r a i n} ^ {(l + 1)} = \widetilde {\mathcal {T}} _ {t r a i n} ^ {(l)} \cup \{i \mid \max _ {c} (\tau \left(\text {o u t} _ {i} ^ {(l)}\right)) \geq \alpha \}, \quad \tilde {\mathbf {y}} _ {i} ^ {(l + 1)} = \left\{ \begin{array}{l l} \mathbf {y} _ {i}, & i \in \mathcal {T} _ {t r a i n} \\ c, & \text {e l s e i f} \max _ {c} (\tau \left(\text {o u t} _ {i} ^ {(l)}\right)) \geq \alpha \end{array} , \right. \tag {8}
|
| 243 |
+
$$
|
| 244 |
+
|
| 245 |
+
where $\tau$ is the softmax function.
|
| 246 |
+
|
| 247 |
+
Inference With Majority Voting. After $k$ layers, we have obtained a series of weak learners $\{\Omega^{(l)} \mid 0 \leq l \leq k\}$ . The final prediction of node $n$ is made by weighted majority voting [58]:
|
| 248 |
+
|
| 249 |
+
$$
|
| 250 |
+
\hat {y} _ {n} = \operatorname {a r g m a x} _ {c} \sum_ {l = 0} ^ {k} \left(\mathbf {z} _ {n} ^ {(l)} - \frac {1}{d} \sum_ {i = 1} ^ {d} \left(\mathbf {z} _ {n, i} ^ {(l)}\right)\right), \tag {9}
|
| 251 |
+
$$
|
| 252 |
+
|
| 253 |
+
where $\mathbf{z}^{(l)} = \log_{-}\text{softmax}(\text{out}_n^{(l)})$ .
|
| 254 |
+
|
| 255 |
+
# 5.4 Empirical Analysis
|
| 256 |
+
|
| 257 |
+
Experiment Settings. Consistent with our effectiveness benchmark, we test our proposed EnGCN on Flickr, Reddit, and ogbn-products. A similar hyperparameter (HP) search was conducted to find its suitable HP setting. The search space is provided in Appendix A2.3. For the baselines, we directly use all benchmark results from section 3, where the SOTA performance has been achieved.
|
| 258 |
+
|
| 259 |
+
Main Experiment. As shown in Table 4, EnGCN outperforms the sampling-based and decoupling-based methods on multi-scale datasets. For Flickr and Reddit, EnGCN outperforms the baselines by a large margin. Remarkably, EnGCN has achieved new SOTA performance on ogbn-products, outperforming C&S by $2.88\%$ and the SOTA model (GIANT-XRT+SAGN+MCR+C&S) in the ogb leaderboard by $1.26\%$ . In addition to the comparison experiment, we also conduct a couple of ablation studies to provide more insights into EnGCN in Appendix A2.4.
|
| 260 |
+
|
| 261 |
+
Table 4: The comparison experiment results on Flickr, Reddit, and ogbn-products
|
| 262 |
+
|
| 263 |
+
<table><tr><td>Category</td><td>Baselines</td><td>Flickr</td><td>Reddit</td><td>ogbn-products</td></tr><tr><td rowspan="5">Sampling-based</td><td>GraphSAGE [3]</td><td>53.63 ± 0.13%</td><td>96.50 ± 0.03%</td><td>80.61 ± 0.16%</td></tr><tr><td>FastGCN [25]</td><td>50.51 ± 0.13%</td><td>79.50 ± 1.22%</td><td>73.46 ± 0.20%</td></tr><tr><td>LADIES [26]</td><td>50.51 ± 0.13%</td><td>86.96 ± 0.37%</td><td>75.31 ± 0.56%</td></tr><tr><td>ClusterGCN [23]</td><td>51.20 ± 0.13%</td><td>95.68 ± 0.03%</td><td>78.62 ± 0.61%</td></tr><tr><td>GraphSAINT [24]</td><td>51.81 ± 0.17%</td><td>95.62 ± 0.05%</td><td>75.36 ± 0.34%</td></tr><tr><td rowspan="5">Decoupling-based</td><td>SGC [27]</td><td>50.35 ± 0.05%</td><td>93.51 ± 0.04%</td><td>67.48 ± 0.11%</td></tr><tr><td>SIGN [28]</td><td>51.60 ± 0.11%</td><td>95.95 ± 0.02%</td><td>76.85 ± 0.56%</td></tr><tr><td>SAGN [29]</td><td>50.07 ± 0.11%</td><td>96.48 ± 0.03%</td><td>81.21 ± 0.07%</td></tr><tr><td>GAMLP [30]</td><td>52.58 ± 0.12%</td><td>96.73 ± 0.03%</td><td>83,76 ± 0.19%</td></tr><tr><td>C&S [38]</td><td>51.24 ± 0.17%</td><td>95.33 ± 0.08%</td><td>85.11 ± 0.07%</td></tr><tr><td rowspan="4">Other SOTA Methods</td><td>AdaGCN [56]</td><td>52.97 ± 0.01%</td><td>96.05 ± 0.00%</td><td>76.41 ± 0.00%</td></tr><tr><td>SAGN+SLE [29]*</td><td>54.60 ± 0.40%</td><td>97.10 ± 0.00%</td><td>84.28 ± 0.14%</td></tr><tr><td>GIANT-XRT+</td><td></td><td></td><td></td></tr><tr><td>SAGN+MCR+C&S [60]*</td><td>-</td><td>-</td><td>86.73 ± 0.08%</td></tr><tr><td>Ours</td><td>EnGCN</td><td>56.43 ± 0.21%</td><td>97.14 ± 0.03%</td><td>87.99 ± 0.04%</td></tr></table>
|
| 264 |
+
|
| 265 |
+
*: the results are from the original papers
|
| 266 |
+
|
| 267 |
+
The Training Efficiency and Convergence Landscape of EnGCN. For EnGCN, since all we need to train is two simple shallow MLPs (Section 5.3), the GPU throughput and memory consumption
|
| 268 |
+
|
| 269 |
+
are expected to be sufficiently efficient. The remained concern is solely about the convergence of EnGCN. Due to the nature of layer-wise training, the convergence of EnGCN is more complicated than other end-to-end training methods. In Figure 3, we show the convergence landscape of EnGCN and provide several interesting observations as follows.
|
| 270 |
+
|
| 271 |
+
1 As shown in Figure 3, the train accuracy and validation accuracy generally increase layer-wisely till convergence. Noticeably, though the training accuracy occasionally drops, the validation accuracy still relatively remains positive. 2 At the beginning of each layer, the accuracy changes rapidly, indicating the remarkable distribution difference for various hops. 3 Different datasets are sensitive to different hops. For example, the 2-nd hop is crucial to boost the training and validation accuracy on Flickr, while for Reddit and ogbn-products, 1-hop neighbors are more important.
|
| 272 |
+
|
| 273 |
+

|
| 274 |
+
|
| 275 |
+

|
| 276 |
+
|
| 277 |
+

|
| 278 |
+
|
| 279 |
+

|
| 280 |
+
Figure 3: The convergence landscape of EnGCN. All models are trained with 4 layers' features. For each layer-wise phase, we train the model with 70 epochs.
|
| 281 |
+
|
| 282 |
+

|
| 283 |
+
|
| 284 |
+

|
| 285 |
+
|
| 286 |
+
The CPU memory consumption of EnGCN. To confirm the low CPU memory consumption of EnGCN, we provide a comparison experiment and illustrate the results in Figure 4. The x-axis denotes the models' number of layers while the y-axis records their allocated memories that are reported by "aten::empty" of PyTorch. As shown in Figure 4, the precomputing-based methods, SIGN and SAGN, suffer from expensive CPU memory consumption as the model depth increases. For sampling-based methods, since there is no need to pre-store a large number of feature matrices, the memory consumption increases much more smoothly. For EnGCN, as no precomputing is re
|
| 287 |
+
|
| 288 |
+

|
| 289 |
+
Figure 4: The allocated CPU memory of EnGCN and selected baselines on Flickr.
|
| 290 |
+
|
| 291 |
+
quired, the CPU memory consumption is considerably reduced in comparison with SIGN and SAGN, which intuitively validates the CPU memory efficiency of EnGCN.
|
| 292 |
+
|
| 293 |
+
# 6 Conclusion
|
| 294 |
+
|
| 295 |
+
The scalability issue of graph convolutional networks has been a notoriously challenging research problem. In this work, we establish a fair and consistent benchmark for large-scale graph training w.r.t effectiveness and efficiency. We provide a unified formulation for dozens of works and further assess them on the basis of accuracy, memory usage, throughput, and convergence. Furthermore, provided with the comprehensive benchmark results, we rethink the scalability issue of GCNs from the perspective of ensembling and then present an ensembling-based trainer scheme (EnGCN) that solely needs to train a couple of simple MLPs to achieve new SOTA on multi-scale large datasets. We hope our study on benchmarking and rethinking to help lay a solid, practical, and systematic foundation for the scalable GCN community and provide researchers with broader and deeper insights into large-scale graph training.
|
| 296 |
+
|
| 297 |
+
# References
|
| 298 |
+
|
| 299 |
+
[1] Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907, 2016.
|
| 300 |
+
[2] Petar Velickovic, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. Graph attention networks. arXiv, 1(2), 2017.
|
| 301 |
+
[3] Will Hamilton, Zhitao Ying, and Jure Leskovec. Inductive representation learning on large graphs. In NeuIPS, pages 1024-1034, 2017.
|
| 302 |
+
[4] Keyulu Xu, Weihua Hu, Jure Leskovec, and Stefanie Jegelka. How powerful are graph neural networks? arXiv preprint arXiv:1810.00826, 2018.
|
| 303 |
+
[5] Xiangnan He, Kuan Deng, Xiang Wang, Yan Li, Yongdong Zhang, and Meng Wang. Lightgen: Simplifying and powering graph convolution network for recommendation. In Proceedings of the 43rd International ACM SIGIR conference on research and development in Information Retrieval, pages 639-648, 2020.
|
| 304 |
+
[6] Rex Ying, Ruining He, Kaifeng Chen, Pong Eksombatchai, William L Hamilton, and Jure Leskovec. Graph convolutional neural networks for web-scale recommender systems. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 974–983, 2018.
|
| 305 |
+
[7] Wenqing Zheng, Edward W Huang, Nikhil Rao, Sumeet Katariya, Zhangyang Wang, and Karthik Subbian. Cold brew: Distilling graph node representations with incomplete or missing neighborhoods. arXiv preprint arXiv:2111.04840, 2021.
|
| 306 |
+
[8] Lei Tang and Huan Liu. Relational learning via latent social dimensions. In Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining, pages 817-826, 2009.
|
| 307 |
+
[9] Hongyang Gao, Zhengyang Wang, and Shuiwang Ji. Large-scale learnable graph convolutional networks. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 1416-1424, 2018.
|
| 308 |
+
[10] Xiao Huang, Qingquan Song, Yuening Li, and Xia Hu. Graph recurrent networks with attributed random walks. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 732-740, 2019.
|
| 309 |
+
[11] Weihua Hu, Matthias Fey, Marinka Zitnik, Yuxiao Dong, Hongyu Ren, Bowen Liu, Michele Catasta, and Jure Leskovec. Open graph benchmark: Datasets for machine learning on graphs. arXiv preprint arXiv:2005.00687, 2020.
|
| 310 |
+
[12] Marinka Zitnik and Jure Leskovec. Predicting multicellular function through multi-layer tissue networks. Bioinformatics, 33(14):i190-i198, 2017.
|
| 311 |
+
[13] Weihua Hu, Bowen Liu, Joseph Gomes, Marinka Zitnik, Percy Liang, Vijay Pande, and Jure Leskovec. Strategies for pre-training graph neural networks. arXiv preprint arXiv:1905.12265, 2019.
|
| 312 |
+
[14] Yuning You, Tianlong Chen, Yongduo Sui, Ting Chen, Zhangyang Wang, and Yang Shen. Graph contrastive learning with augmentations. Advances in Neural Information Processing Systems, 33, 2020.
|
| 313 |
+
[15] Nikil Wale, Ian A Watson, and George Karypis. Comparison of descriptor spaces for chemical compound retrieval and classification. Knowledge and Information Systems, 14(3):347-375, 2008.
|
| 314 |
+
[16] Guohao Li, Matthias Muller, Ali Thabet, and Bernard Ghanem. Deep GCNs: Can GCNs go as deep as cnns? In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9267-9276, 2019.
|
| 315 |
+
|
| 316 |
+
[17] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E Sarma, Michael M Bronstein, and Justin M Solomon. Dynamic graph cnn for learning on point clouds. Acm Transactions On Graphics (tog), 38(5):1-12, 2019.
|
| 317 |
+
[18] Guohao Li, Chenxin Xiong, Ali Thabet, and Bernard Ghanem. Deepergen: All you need to train deeper GCs. arXiv preprint arXiv:2006.07739, 2020.
|
| 318 |
+
[19] Johannes Klicpera, Aleksandar Bojchevski, and Stephan Gunnemann. Predict then propagate: Graph neural networks meet personalized pagerank. arXiv preprint arXiv:1810.05997, 2018.
|
| 319 |
+
[20] Keyulu Xu, Chengtao Li, Yonglong Tian, Tomohiro Sonobe, Ken-ichi Kawarabayashi, and Stefanie Jegelka. Representation learning on graphs with jumping knowledge networks. In International Conference on Machine Learning, pages 5453–5462. PMLR, 2018.
|
| 320 |
+
[21] Hongyang Gao and Shuiwang Ji. Graph u-nets. In international conference on machine learning, pages 2083-2092. PMLR, 2019.
|
| 321 |
+
[22] Kaixiong Zhou, Qingquan Song, Xiao Huang, Daochen Zha, Na Zou, and Xia Hu. Multi-channel graph neural networks. arXiv preprint arXiv:1912.08306, 2019.
|
| 322 |
+
[23] Wei-Lin Chiang, Xuanqing Liu, Si Si, Yang Li, Samy Bengio, and Cho-Jui Hsieh. Cluster-gcn: An efficient algorithm for training deep and large graph convolutional networks. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 257-266, 2019.
|
| 323 |
+
[24] Hanqing Zeng, Hongkuan Zhou, Ajitesh Srivastava, Rajgopal Kannan, and Viktor Prasanna. Graphsaint: Graph sampling based inductive learning method. arXiv preprint arXiv:1907.04931, 2019.
|
| 324 |
+
[25] Jie Chen, Tengfei Ma, and Cao Xiao. Fastgcn: fast learning with graph convolutional networks via importance sampling. arXiv preprint arXiv:1801.10247, 2018.
|
| 325 |
+
[26] Difan Zou, Ziniu Hu, Yewen Wang, Song Jiang, Yizhou Sun, and Quanquan Gu. Layer-dependent importance sampling for training deep and large graph convolutional networks. Advances in neural information processing systems, 32, 2019.
|
| 326 |
+
[27] Felix Wu, Amauri Souza, Tianyi Zhang, Christopher Fifty, Tao Yu, and Kilian Weinberger. Simplifying graph convolutional networks. In International conference on machine learning, pages 6861-6871. PMLR, 2019.
|
| 327 |
+
[28] Fabrizio Frasca, Emanuele Rossi, Davide Eynard, Ben Chamberlain, Michael Bronstein, and Federico Monti. Sign: Scalable inception graph neural networks. arXiv preprint arXiv:2004.11198, 2020.
|
| 328 |
+
[29] Chuxiong Sun and Guoshi Wu. Scalable and adaptive graph neural networks with self-label-enhanced training. arXiv preprint arXiv:2104.09376, 2021.
|
| 329 |
+
[30] Wentao Zhang, Ziqi Yin, Zeang Sheng, Wen Ouyang, Xiaosen Li, Yangyu Tao, Zhi Yang, and Bin Cui. Graph attention multi-layer perceptron. arXiv preprint arXiv:2108.10097, 2021.
|
| 330 |
+
[31] Aleksandar Bojchevski, Johannes Klicpera, Bryan Perozzi, Amol Kapoor, Martin Blais, Benedek Rózemberczki, Michal Lukasik, and Stephan Gunnemann. Scaling graph neural networks with approximate pagerank. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 2464–2473, 2020.
|
| 331 |
+
[32] Vasimuddin Md, Sanchit Misra, Guixiang Ma, Ramanarayan Mohanty, Evangelos Georganas, Alexander Heinecke, Dhiraj Kalamkar, Nesreen K Ahmed, and Sasikanth Avancha. Distgnn: Scalable distributed training for large-scale graph neural networks. arXiv preprint arXiv:2104.06700, 2021.
|
| 332 |
+
[33] Zirui Liu, Kaixiong Zhou, Fan Yang, Li Li, Rui Chen, and Xia Hu. Exact: Scalable graph neural networks training via extreme activation compression. In International Conference on Learning Representations, 2021.
|
| 333 |
+
|
| 334 |
+
[34] Jianfei Chen, Jun Zhu, and Le Song. Stochastic training of graph convolutional networks with variance reduction. arXiv preprint arXiv:1710.10568, 2017.
|
| 335 |
+
[35] Weilin Cong, Rana Forsati, Mahmut Kandemir, and Mehrdad Mahdavi. Minimal variance sampling with provable guarantees for fast training of graph neural networks. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 1393–1403, 2020.
|
| 336 |
+
[36] Wenbing Huang, Tong Zhang, Yu Rong, and Junzhou Huang. Adaptive sampling towards fast graph representation learning. Advances in neural information processing systems, 31, 2018.
|
| 337 |
+
[37] Meng Liu and Shuiwang Ji. Neighbor2seq: Deep learning on massive graphs by transforming neighbors to sequences. arXiv preprint arXiv:2202.03341, 2022.
|
| 338 |
+
[38] Qian Huang, Horace He, Abhay Singh, Ser-Nam Lim, and Austin R Benson. Combining label propagation and simple models out-performs graph neural networks. arXiv preprint arXiv:2010.13993, 2020.
|
| 339 |
+
[39] George Karypis and Vipin Kumar. A fast and high quality multilevel scheme for partitioning irregular graphs. SIAM Journal on scientific Computing, 20(1):359-392, 1998.
|
| 340 |
+
[40] Xiaojin Zhu. Semi-supervised learning with graphs. Carnegie Mellon University, 2005.
|
| 341 |
+
[41] Fei Wang and Changshui Zhang. Label propagation through linear neighborhoods. IEEE Transactions on Knowledge and Data Engineering, 20(1):55-67, 2007.
|
| 342 |
+
[42] Masayuki Karasuyama and Hiroshi Mamitsuka. Manifold-based similarity adaptation for label propagation. Advances in neural information processing systems, 26:1547-1555, 2013.
|
| 343 |
+
[43] Chen Gong, Dacheng Tao, Wei Liu, Liu Liu, and Jie Yang. Label propagation via teaching-to-learn and learning-to-teach. IEEE transactions on neural networks and learning systems, 28(6):1452-1465, 2016.
|
| 344 |
+
[44] Yanbin Liu, Juho Lee, Minseop Park, Saehoon Kim, Eunho Yang, Sung Ju Hwang, and Yi Yang. Learning to propagate labels: Transductive propagation network for few-shot learning. arXiv preprint arXiv:1805.10002, 2018.
|
| 345 |
+
[45] Hongwei Wang and Jure Leskovec. Unifying graph convolutional neural networks and label propagation. arXiv preprint arXiv:2002.06755, 2020.
|
| 346 |
+
[46] Eli Chien, Wei-Cheng Chang, Cho-Jui Hsieh, Hsiang-Fu Yu, Jiong Zhang, Olgica Milenkovic, and Inderjit S Dhillon. Node feature extraction by self-supervised multi-scale neighborhood prediction. arXiv preprint arXiv:2111.00064, 2021.
|
| 347 |
+
[47] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean. Distributed representations of words and phrases and their compositionality. Advances in neural information processing systems, 26, 2013.
|
| 348 |
+
[48] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
|
| 349 |
+
[49] Matthias Fey, Jan E Lenssen, Frank Weichert, and Jure Leskovec. Gnautoscale: Scalable and expressive graph neural networks via historical embeddings. In International Conference on Machine Learning, pages 3294-3304. PMLR, 2021.
|
| 350 |
+
[50] Mucong Ding, Kezhi Kong, Jingling Li, Chen Zhu, John Dickerson, Furong Huang, and Tom Goldstein. Vq-gnn: A universal framework to scale up graph neural networks using vector quantization. Advances in Neural Information Processing Systems, 34:6733-6746, 2021.
|
| 351 |
+
[51] Qimai Li, Zhichao Han, and Xiao-Ming Wu. Deeper insights into graph convolutional networks for semi-supervised learning. In Thirty-Second AAAI Conference on Artificial Intelligence, 2018.
|
| 352 |
+
|
| 353 |
+
[52] Tianlong Chen, Kaixiong Zhou, Keyu Duan, Wenqing Zheng, Peihao Wang, Xia Hu, and Zhangyang Wang. Bag of tricks for training deeper graph neural networks: A comprehensive benchmark study. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022.
|
| 354 |
+
[53] Kenta Oono and Taiji Suzuki. Graph neural networks exponentially lose expressive power for node classification. In International Conference on Learning Representations, 2020.
|
| 355 |
+
[54] Kaixiong Zhou, Xiao Huang, Yuening Li, Daochen Zha, Rui Chen, and Xia Hu. Towards deeper graph neural networks with differentiable group normalization. Advances in Neural Information Processing Systems, 33, 2020.
|
| 356 |
+
[55] Kaixiong Zhou, Xiao Huang, Daochen Zha, Rui Chen, Li Li, Soo-Hyun Choi, and Xia Hu. Dirichlet energy constrained learning for deep graph neural networks. Advances in Neural Information Processing Systems, 34:21834-21846, 2021.
|
| 357 |
+
[56] Ke Sun, Zhanxing Zhu, and Zhouchen Lin. Adagcn: Adaboosting graph convolutional networks into deep models. arXiv preprint arXiv:1908.05081, 2019.
|
| 358 |
+
[57] Yoav Freund, Robert Schapire, and Naoki Abe. A short introduction to boosting. Journal-Japanese Society For Artificial Intelligence, 14(771-780):1612, 1999.
|
| 359 |
+
[58] Trevor Hastie, Saharon Rosset, Ji Zhu, and Hui Zou. Multi-class adaboost. Statistics and its Interface, 2(3):349-360, 2009.
|
| 360 |
+
[59] Li Zheng, Jun Gao, Zhao Li, and Ji Zhang. Adaboosting clusters on graph neural networks. In 2021 IEEE International Conference on Data Mining (ICDM), pages 1523-1528. IEEE, 2021.
|
| 361 |
+
[60] Chenhui Zhang, Yufei He, Yukuo Cen, Zhenyu Hou, and Jie Tang. Improving the training of graph neural networks with consistency regularization. arXiv preprint arXiv:2112.04319, 2021.
|
| 362 |
+
[61] Jure Leskovec and Christos Faloutsos. Sampling from large graphs. In Proceedings of the 12th ACM SIGKDD international conference on Knowledge discovery and data mining, pages 631-636, 2006.
|
| 363 |
+
[62] Matthias Fey and Jan Eric Lenssen. Fast graph representation learning with pytorch geometric. arXiv preprint arXiv:1903.02428, 2019.
|
| 364 |
+
[63] Jianfei Chen, Lianmin Zheng, Zhewei Yao, Dequan Wang, Ion Stoica, Michael W Mahoney, and Joseph E Gonzalez. Actnn: Reducing training memory footprint via 2-bit activation compressed training. In International Conference on Machine Learning, 2021.
|
acomprehensivestudyonlargescalegraphtrainingbenchmarkingandrethinking/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2e748cf61ed962e31729f7ac676cb4c7d070b68b1e6259e7140724f11b0a3199
|
| 3 |
+
size 515215
|
acomprehensivestudyonlargescalegraphtrainingbenchmarkingandrethinking/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c0409e845aa93e3df748fb0a7da1862228926fadf8144c8433b46bf21fb00bfb
|
| 3 |
+
size 497016
|
aconditionalrandomizationtestforsparselogisticregressioninhighdimension/a0a09a92-db33-4a74-8880-36b12ccc9eff_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:de072bb7bc3bd54ca9f44fa3e8c61b5f5cbe9b9179972e3e6cc8cb7ef77ed739
|
| 3 |
+
size 88405
|
aconditionalrandomizationtestforsparselogisticregressioninhighdimension/a0a09a92-db33-4a74-8880-36b12ccc9eff_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3601946a478ee9f57024574dc90bd5b658882a750fa95b5d6155b2a46b4fc017
|
| 3 |
+
size 106977
|
aconditionalrandomizationtestforsparselogisticregressioninhighdimension/a0a09a92-db33-4a74-8880-36b12ccc9eff_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79606faffcde3ee980373349351e8a7f9dcfe855df6f42fa809575a0df2df8aa
|
| 3 |
+
size 2369193
|
aconditionalrandomizationtestforsparselogisticregressioninhighdimension/full.md
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Conditional Randomization Test for Sparse Logistic Regression in High-Dimension
|
| 2 |
+
|
| 3 |
+
Binh T. Nguyen
|
| 4 |
+
|
| 5 |
+
LTCI, Telecom Paris, IP Paris
|
| 6 |
+
|
| 7 |
+
tuanbinhs@gmail.com
|
| 8 |
+
|
| 9 |
+
Bertrand Thirion
|
| 10 |
+
|
| 11 |
+
Université Paris-Saclay, Inria, CEA, Palaiseau 91120, France
|
| 12 |
+
|
| 13 |
+
bertrand.thirion@inria.fr
|
| 14 |
+
|
| 15 |
+
# Sylvain Arlot
|
| 16 |
+
|
| 17 |
+
Université Paris-Saclay, CNRS, Inria, Laboratoire de mathématiques d'Orsay, 91405, Orsay, France sylvain.arlot@universite-paris-saclay.fr
|
| 18 |
+
|
| 19 |
+
# Abstract
|
| 20 |
+
|
| 21 |
+
Identifying the relevant variables for a classification model with correct confidence levels is a central but difficult task in high-dimension. Despite the core role of sparse logistic regression in statistics and machine learning, it still lacks a good solution for accurate inference in the regime where the number of features $p$ is as large as or larger than the number of samples $n$ . Here we tackle this problem by improving the Conditional Randomization Test (CRT). The original CRT algorithm shows promise as a way to output p-values while making few assumptions on the distribution of the test statistics. As it comes with a prohibitive computational cost even in mildly high-dimensional problems, faster solutions based on distillation have been proposed. Yet, they rely on unrealistic hypotheses and result in low-power solutions. To improve this, we propose CRT-logit, an algorithm that combines a variable-distillation step and a decorrelation step that takes into account the geometry of the $\ell_1$ -penalized logistic regression problem. We provide a theoretical analysis of this procedure, and demonstrate its effectiveness on simulations, along with experiments on large-scale brain-imaging and genomics datasets.
|
| 22 |
+
|
| 23 |
+
# 1 Introduction
|
| 24 |
+
|
| 25 |
+
Logistic regression is one of the most popular tools in modern applications of statistics and machine learning, partly due to its relative algorithmic simplicity. The method belongs to the class of generalized linear models that handle discrete outcomes, i.e. classification problems. Here, we focus on the binary classification problem, where one observation of the responses $y \in \{0,1\}$ and the data vectors $\mathbf{x} \in \mathbb{R}^p$ follows the relationship:
|
| 26 |
+
|
| 27 |
+
$$
|
| 28 |
+
\mathbb {P} (y = 1 \mid \mathbf {x}) = g \left(\mathbf {x} ^ {T} \boldsymbol {\beta} ^ {0}\right) = \frac {1}{1 + \exp \left(- \mathbf {x} ^ {T} \boldsymbol {\beta} ^ {0}\right)}, \tag {1}
|
| 29 |
+
$$
|
| 30 |
+
|
| 31 |
+
where $g(x) = 1 / (1 + \exp (-x))$ is the sigmoid function, and $\beta^0$ the vector of true regression coefficients. In the classical setting, in which the number of samples $n$ is greater than the number of features $p$ , an estimate $\hat{\beta}$ of the true signals $\beta^0$ can be obtained using maximum likelihood estimation (MLE). The asymptotic behaviour and derivation of the test statistic, confidence intervals and p-values of the MLE have been well studied, e.g. in [13]. The availability of p-values for the test statistics makes it possible to rely on multiple hypothesis testing, where one wants to test which variables have a non-zero effect on the outcome, conditioned on the remaining variables. Unfortunately, this line of analysis cannot be applied to the high-dimensional regime, where $p$ is larger than $n$ , as argued in [25, 32, 34]. These works show that in the regime $\lim_{n,p\to \infty}n / p = \kappa$ , the MLE estimator
|
| 32 |
+
|
| 33 |
+
exists only when $\kappa > 2$ . However, we note that this type of analysis is done without the addition of $\ell_1$ -regularization to the likelihood function, i.e. without using a penalized estimator to enforce sparsity.
|
| 34 |
+
|
| 35 |
+
Motivation Our focus in this paper is to do inference with statistical guarantees on high-dimensional sparse logistic regression, where $p$ is larger or much larger than $n$ . This setting is typical in modern applications of pattern recognition, e.g. in brain-imaging or genomics [3], with $p$ as large as hundreds of thousands –compressible to thousands—but $n$ stays at most few thousand.
|
| 36 |
+
|
| 37 |
+
The family of methods we consider is the Conditional Randomization Test (CRT) [10]. CRT relies on generating multiple noisy copies of original variables to output empirical p-values in high-dimensional inference problems. However, prohibitive computational cost makes CRT impractical, as discussed at length in [10, 26, 7, 19]. There have been several lines of research attempting to fix this problem, most notably the distilled Conditional Randomization Test (dCRT) [19]. This work introduced a distillation step as a replacement for the randomized sampling step to compute the importance statistics (see Section 2 for more details). It provides a way to output p-values for multiple types of regression and classification problems, assuming convergence to Gaussian distribution of the test statistic in large-sample regime. Yet, as shown in the left panel of Figure 1, the originally proposed dCRT test-statistic for logistic regression
|
| 38 |
+
|
| 39 |
+
does not behave as well as intended. In particular, its null distribution deviates markedly from standard normal in high-dimension whenever $n / p \leq 1$ .
|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
Figure 1: QQ-Plot for 1000 samples of test-statistic of a null index for logistic regression, with simulated data, $n = 200$ , $p = 400$ . Left: Statistics obtained from running Distilled-CRT, and Right: from our proposed algorithm. The empirical distribution of the dCRT null-statistic strays far from theoretical distribution, which is standard normal, while empirical distribution of CRT-logit's null test score is much closer.
|
| 43 |
+
|
| 44 |
+
Contribution We propose a correction for the dCRT, inspired by the decorrelation method presented in [21]. The decorrelation step makes the null-distribution of the test statistics much closer to standard normal, as shown on the right panel of Figure 1, and thus increases the statistical power of the method. We provide asymptotic analysis of this method, which shows that CRT-logit produces standard normal test-statistics in the large-sample regime. In addition, we validate the high performance of CRT-logit on large-scale brain-imaging and genetics datasets, thus showing its usefulness in practical applications.
|
| 45 |
+
|
| 46 |
+
Related works The closest cousin of the Conditional Randomization Test is Knockoff Filter [4, 10], a recent breakthrough in the False Discovery Rate (FDR) control literature. It relies on the creation of additional noisy features, called knockoffs, to calculate variable-importance statistics. Another extension of vanilla CRT is the Holdout Randomization Test (HRT) [26]. While still requiring multiple samplings of noisy variables, HRT solves the computational issue of original CRT by doing heavy model fitting only once on one part of the dataset, and test statistics calculation on the other part, without refitting the model. However, this method relies on sample-splitting, and hence inherently suffers from a loss of statistical power. A parallel line of work has introduced the Conditional Permutation Test (CPT) [7], a non-parametric alternative to CRT that relies on a random shuffling mechanism applied to original variables, instead of multiple sampling of new variables. This potentially makes CPT more robust to model mis-specification. [32] recently proposed a method called SLOE, which adapts the analysis of [34], but in a regime different from what we are considering, where $\lim_{n,p\to \infty}n / p\rightarrow \kappa \in (1,2)$ , and more importantly without sparsity-inducing penalty. On a separate note, we notice the similarity of dCRT [19] with debiased Lasso [16, 28, 33]. This line of work proposed a debiasing formula for the estimator, which makes the asymptotic distribution of $(\hat{\beta}^{\mathrm{LASSO}} - \beta^{0})$ standard normal, so that one can compute the test statistic and p-value associated with each variable.
|
| 47 |
+
|
| 48 |
+
# 2 Background
|
| 49 |
+
|
| 50 |
+
Notation We denote matrices, vectors, scalars and sets by bold uppercase, bold lowercase, script lowercase, and calligraphic letters, respectively, e.g. $\mathbf{X}$ , $\mathbf{x}$ , $x$ , and $\mathcal{X}$ . The $i$ -th row of a matrix $\mathbf{X}$ will be denoted $\mathbf{X}_{i,*}$ , the $j$ -th column $\mathbf{X}_{*,j}$ and the $(i,j)$ -th element $\mathbf{X}_{i,j}$ . For any natural number $p$ , we denote the set $[p] \stackrel{\mathrm{def.}}{=} \{1, \ldots, p\}$ . For each $\mathbf{x} \in \mathbb{R}^p$ and $j \in [p]$ , we denote $\mathbf{x}_{-j} \stackrel{\mathrm{def.}}{=} \{x_1, x_2, \ldots, x_{j-1}, x_{j+1}, \ldots, x_p\}$ as $p-1$ dimensional observation after removing the $j$ -th variable. Correspondingly, $\mathbf{X}_{-j}$ is the data matrix $\mathbf{X} \in \mathbb{R}^{n \times p}$ with column $\mathbf{X}_{*,j}$ removed. The cumulative distribution function (CDF) of the standard Gaussian distribution will be denoted $\Phi(\cdot)$ . The indicator function of a random event $\mathcal{A}$ will be denoted $\mathbf{1}_{\mathcal{A}}$ . For any two positive sequences $x_n$ and $y_n$ , we write $x_n \asymp y_n$ if $c y_n \leq x_n \leq C y_n$ for all $n$ , for some positive constants $c$ and $C$ . For a vector $\mathbf{x}$ , $\| \mathbf{x} \|_p$ denotes its $\ell_p$ norm. For a function $f: \mathbb{R}^p \to \mathbb{R}$ , $\nabla_j f$ denotes its gradient w.r.t. the $j$ -th variable, for $j \in [p]$ .
|
| 51 |
+
|
| 52 |
+
Problem setting We consider exclusively binary classification, where the response vector is denoted $\mathbf{y} \in \{0,1\}^n$ and the data matrix $\mathbf{X} \in \mathbb{R}^{n \times p}$ consists of $n$ $p$ -dimensional samples. Throughout the paper, we assume the data $\{\mathbf{X}_{i,*}\}_{i=1}^n$ are i.i.d. and follow a distribution with zero mean and population covariance matrix $\boldsymbol{\Sigma}$ . Moreover, we assume that $\mathbf{X}_{i,*}$ and $\mathbf{y}_i$ follow the logistic relationship in Eq. (1). We denote the support set $S \stackrel{\mathrm{def.}}{=} \{j \in [p] : \beta_j^0 \neq 0\}$ and assume that it is sparse, i.e. $\operatorname{card}(S) = s^* \ll p$ , where card denotes the cardinality of a set. Furthermore, $\hat{S} \stackrel{\mathrm{def.}}{=} \{j \in [p] : \hat{\beta}_j \neq 0\}$ indicates an estimation of $S$ , where $\hat{\beta}_j$ is an estimate of the true signal $\beta_j^0$ . We try to obtain it through a $\ell_1$ -penalized logistic estimator:
|
| 53 |
+
|
| 54 |
+
$$
|
| 55 |
+
\hat {\beta} _ {\lambda} = \underset {\boldsymbol {\beta} \in \mathbb {R} ^ {p}} {\operatorname {a r g m i n}} \ell (\boldsymbol {\beta}) + \lambda \| \boldsymbol {\beta} \| _ {1}, \text {w i t h} \ell (\boldsymbol {\beta}) = - \frac {1}{n} \sum_ {i = 1} ^ {n} \left\{\left(\mathbf {X} _ {i, *} \boldsymbol {\beta}\right) y _ {i} - \log \left[ 1 + \exp \left(\mathbf {X} _ {i, *} \boldsymbol {\beta}\right) \right] \right\}. \tag {2}
|
| 56 |
+
$$
|
| 57 |
+
|
| 58 |
+
We denote $\mathbf{I} \stackrel{\mathrm{def.}}{=} \mathbb{E}_{\beta^0}[\nabla^2 \ell(\beta^0)]$ the Fisher information matrix, and $\mathbf{I}_{j| - j}$ the partial Fisher information, defined by $\mathbf{I}_{j| - j} \stackrel{\mathrm{def.}}{=} \mathbb{E}[\nabla_{jj}^2 \ell(\beta^0) - [\nabla_{j, - j}^2 \ell(\beta^0)]^\top [\nabla_{-j, - j}^2 \ell(\beta^0)]^{-1} \nabla_{-j,j}^2 \ell(\beta^0)] = \mathbf{I}_{jj} - \mathbf{I}_{j, - j} \mathbf{I}_{-j, - j}^{-1} \mathbf{I}_{-j,j}$ , where $\mathbf{I}_{j, - j}$ is the row-vector made with the $j$ th-row and the columns corresponding to $\beta_{-j}$ , $\mathbf{I}_{-j, - j}$ the sub-matrix of $\mathbf{I}$ made with the rows and columns corresponding to $\beta_{-j}$ . This quantity, defined following [13, pp. 323], plays an important role in our proposed method, detailed in Section 3.
|
| 59 |
+
|
| 60 |
+
Statistical control with False Discovery Rate To quantify statistical errors, we consider the False Discovery Rate, introduced in [5]. Given an estimate of the support $\hat{S}$ , the false discovery proportion (FDP) is the ratio of the number of selected features that do not belong to the true support $S$ , divided by the total number of selected features. The False Discovery Rate is the expectation of the FDP:
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
\operatorname {F D P} (\hat {\mathcal {S}}) = \frac {\operatorname {c a r d} (\{j : j \in \hat {\mathcal {S}} , j \notin S \})}{\operatorname {c a r d} (\hat {\mathcal {S}}) \lor 1} \qquad \text {a n d} \qquad \operatorname {F D R} (\hat {\mathcal {S}}) = \mathbb {E} [ \operatorname {F D P} (\hat {\mathcal {S}}) ].
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
Conditional Randomization Test (CRT) and Distillation CRT (dCRT) The concept of Conditional Randomization Test was originally proposed in the model-X knockoff paper [10] as a way to output valid empirical p-values using knockoff variables. The principle of the knockoff filter is first to sample noisy copies $\tilde{\mathbf{X}}_{*,j}$ of variable $\mathbf{X}_{*,j}$ , given a known sampling mechanism $P_{j| - j}$ . One advantage of the knockoff filter is that no specific assumption is placed on the distribution of the inferred test statistic. However, this means that, in general, there is no mechanism to derive p-values from the knockoff statistic. This motivates the introduction of CRT, which requires running high-dimensional inference for each variable $j$ $B$ times. However, the computation cost of CRT is prohibitive when $p$ grows large: assuming that we use the Lasso program with coordinate descent to compute $T_{j}^{\mathrm{CRT}}$ , its runtime would be $\mathcal{O}(Bp^4)$ [15, pp. 93]. Moreover, CRT requires decently large $B$ to make the empirical distribution of the p-values smooth enough. Reducing the computational cost of CRT is the main motivation of several works [7, 19, 26]. One of them is the introduction of distillation-CRT (dCRT) [19]. The main appeal of this method is that it can output p-values analytically, therefore bypassing the multiple knockoffs sampling steps used to infer on each variable, and leads to a reasonable reduction of the computation cost.
|
| 67 |
+
|
| 68 |
+
Distillation operation The key addition of dCRT is the distillation operation: for each variable $j$ , we want to distill all the conditional information of the remaining variables $\mathbf{X}_{-j}$ to $\mathbf{x}_j$ and to $\mathbf{y}$ via least-squares minimization with $\ell_1$ -regularization to enforce sparsity. For each variable $j$ , we first solve the lasso problem by regressing $\mathbf{X}_{*,j}$ on $\mathbf{X}_{-j}$ ,
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
\hat {\beta} ^ {d _ {\mathbf {X} _ {*}, j}} = \underset {\boldsymbol {\beta} \in \mathbb {R} ^ {p - 1}} {\operatorname {a r g m i n}} \frac {1}{2} \| \mathbf {X} _ {* , j} - \mathbf {X} _ {- j} \boldsymbol {\beta} \| _ {2} ^ {2} + \lambda_ {d x} \| \boldsymbol {\beta} \| _ {1}. \tag {3}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
For distillation of variable $j$ and the binary response $\mathbf{y}$ with logistic relationship, [19] briefly suggested to solve a penalized estimation problem, similar to Eq. (2):
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
\hat {\boldsymbol {\beta}} ^ {d _ {y}, j} = \operatorname * {a r g m i n} _ {\boldsymbol {\beta} \in \mathbb {R} ^ {p - 1}} - \frac {1}{n} \sum_ {i = 1} ^ {n} \left\{\left(\mathbf {X} _ {i, - j} ^ {\top} \boldsymbol {\beta}\right) y _ {i} - \log \left[ 1 + \exp \left(\mathbf {X} _ {i, - j} ^ {T} \boldsymbol {\beta}\right) \right] \right\} + \lambda \| \boldsymbol {\beta} \| _ {1}. \tag {4}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
Then, a test statistic is calculated for each $j = 1,\dots ,p$ :
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
T _ {j} = \sqrt {n} \frac {\left\langle \mathbf {y} - \mathbf {X} _ {- j} \hat {\boldsymbol {\beta}} ^ {d _ {y , j}} , \mathbf {X} _ {*, j} - \mathbf {X} _ {- j} \hat {\boldsymbol {\beta}} ^ {d _ {\mathbf {X} _ {*, j}}} \right\rangle}{\| \mathbf {y} - \mathbf {X} _ {- j} \hat {\boldsymbol {\beta}} ^ {d _ {y , j}} \| _ {2} \| \mathbf {x} _ {j} - \mathbf {X} _ {- j} \hat {\boldsymbol {\beta}} ^ {d _ {\mathbf {X} _ {*, j}}} \| _ {2}}. \tag {5}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
Intuitively, Eq. (5) is the correlation of the regression residuals, calculated from Eq. (3) and (4), scaled by a factor of $\sqrt{n}$ . Under the null hypothesis, and more importantly, assuming linear relationship between $\mathbf{X}_{i,*}$ and $\mathbf{y}$ , this quantity follows standard normal distribution asymptotically, conditional to $\mathbf{y}$ and $\mathbf{X}_{-j}$ . It then follows that we can output a p-value for each variable $j$ by taking $\widehat{p}_j = 2\left[1 - \Phi(T_j)\right]$ .
|
| 87 |
+
|
| 88 |
+
However, the formulation of test statistics in Eq. (5) is not truly satisfactory in the setting of sparse logistic regression. More specifically, both the calculation of regression residuals $\mathbf{y} - \mathbf{X}_{-j}\hat{\boldsymbol{\beta}}^{d_y,j}$ and test statistics $T_{j}$ do not take into account the non-linear relationship between $\mathbf{X}$ and the binary response $\mathbf{y}$ . The first row of Figure 6 plots the qq-plot of the test statistics $T_{j}$ for logistic regression, which shows that even in the classical regime where $n > p$ , its distribution is far from standard normal.
|
| 89 |
+
|
| 90 |
+
# 3 Decorating Test-Statistics for High-Dimensional Logistic Regression
|
| 91 |
+
|
| 92 |
+
As we have elaborated, the formulation of dCRT is not well-suited for problems other than penalized least-squares regression. We therefore propose an adaptation of dCRT in the case of logistic regression, inspired by the classical work of [13] and by [21]. First, note that when testing $H_0^j: \beta_j^0 = 0$ under the case where $n > p$ , we have the classical Rao's test statistic, defined by
|
| 93 |
+
|
| 94 |
+
$$
|
| 95 |
+
T _ {j} ^ {\mathrm {R a o}} = \sqrt {n} \hat {\mathbf {I}} _ {j | - j} ^ {- 1 / 2} \nabla_ {j} \ell \left(0, \hat {\boldsymbol {\beta}} _ {- j}\right), \tag {6}
|
| 96 |
+
$$
|
| 97 |
+
|
| 98 |
+
where $\nabla_{j}\ell (0,\hat{\beta}_{-j})\stackrel {\mathrm{def.}}{=}\nabla_{\beta_{j}}\ell (\beta_{j},\hat{\beta}_{-j})\big|_{\beta_{j} = 0}$ is the Fisher score. Here $\hat{\beta}_{-j}\stackrel {\mathrm{def.}}{=}$ argmin $\pmb{\beta}_{-j}\in \mathbb{R}^{p - 1}$ $\ell (\beta_j,\beta_{-j})$ is the constrained maximum-likelihood estimator of $\beta_{-j}$ with fixed $\beta_{j}$ , and $\hat{\mathbf{I}}_{j| - j}$ is a consistent estimator of the partial Fisher information $\mathbf{I}_{j| - j}$ . The appearance of the term $\hat{\mathbf{I}}_{j| - j}^{-1 / 2}$ is due to the fact that under the null hypothesis $H_0^j$ , we have, by [13, Chapter 9], [24],
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\sqrt {n} \nabla_ {j} \ell (0, \hat {\boldsymbol {\beta}} _ {- j}) \xrightarrow [ n \to \infty ]{(d)} \mathcal {N} (0, \mathbf {I} _ {j | - j}),
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
which makes the asymptotic distribution of $T_{j}^{\mathrm{Rao}}$ standard normal. However, in the high-dimension case, where $n < p$ , we do not reach this convergence in distribution. To see this, consider the Taylor expansion of the Fisher score of variable $j$ around any given estimator $\widetilde{\beta}_{-j}$ of the true $\beta_{-j}^{0}$ :
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
\nabla_ {j} \ell (0, \widetilde {\boldsymbol {\beta}} _ {- j}) = \nabla_ {j} \ell (0, \boldsymbol {\beta} _ {- j} ^ {0}) + \nabla_ {j, - j} ^ {2} \ell (0, \boldsymbol {\beta} _ {- j} ^ {0}) (\widetilde {\boldsymbol {\beta}} _ {- j} - \boldsymbol {\beta} _ {- j} ^ {0}) + \mathcal {O} ((\widetilde {\boldsymbol {\beta}} _ {- j} - \boldsymbol {\beta} _ {- j} ^ {0}) ^ {2}) \tag {7}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
On the right-hand side, the first term converges weakly to a normal distribution due to the Central Limit Theorem, the remainder term becomes negligible using $\ell_1$ regularization to induce sparsity, but the second term does not, due to estimation bias and sparsity effect of $\widetilde{\beta}_{-j}$ [14].
|
| 111 |
+
|
| 112 |
+
Adapting distillation operation for sparse logistic regression Fortunately, Eq. (7) suggests that for each variable $j$ , we can debias the Fisher score by correcting the impact of other terms. In particular, for each variable $j$ , we replace the Fisher score by
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
\nabla_ {j} \ell (0, \boldsymbol {\beta} _ {- j}) - \mathbf {I} _ {j, - j} \mathbf {I} _ {- j, - j} ^ {- 1} \nabla_ {- j} \ell (0, \boldsymbol {\beta} _ {- j}). \tag {8}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
The inversion of the large matrix $\mathbf{I}_{-j, - j} \in \mathbb{R}^{(p - 1) \times (p - 1)}$ is computationally prohibitive, but we can estimate the term $\mathbf{I}_{j, - j}\mathbf{I}_{-j, - j}^{-1}$ straightforwardly by solving
|
| 119 |
+
|
| 120 |
+
$$
|
| 121 |
+
\hat {\mathbf {w}} ^ {j} = \underset {\mathbf {w} \in \mathbb {R} ^ {p - 1}} {\operatorname {a r g m i n}} \frac {1}{2 n} \sum_ {i = 1} ^ {n} \left[ \nabla_ {j, - j} ^ {2} \ell_ {i} (\hat {\boldsymbol {\beta}}) - \mathbf {w} ^ {T} \nabla_ {- j, - j} ^ {2} \ell_ {i} (\hat {\boldsymbol {\beta}}) \right] ^ {2} + \lambda \| \mathbf {w} \| _ {1}, \tag {9}
|
| 122 |
+
$$
|
| 123 |
+
|
| 124 |
+
for each variable $j$ , where $\hat{\beta}$ is given with Eq. (2). Moreover, since we have the closed-form of the derivatives of the logistic loss $\ell(\hat{\beta})$ , a simple derivation from Eq. (9) suggests the following $x_{j}$ -distillation operation, adapted for logistic regression:
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
\hat {\boldsymbol {\beta}} ^ {d _ {\mathbf {X} _ {*}, j}} = \underset {\boldsymbol {\beta} \in \mathbb {R} ^ {p - 1}} {\operatorname {a r g m i n}} \frac {1}{n} \sum_ {i = 1} ^ {n} g ^ {\prime \prime} \left(\mathbf {X} _ {i, *} \hat {\boldsymbol {\beta}}\right) \left(\mathbf {X} _ {i, j} - \mathbf {X} _ {i, - j} \boldsymbol {\beta}\right) ^ {2} + \lambda_ {d x} \| \boldsymbol {\beta} \| _ {1}, \tag {10}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
where the extra term (second-order derivative of the sigmoid function) $g^{\prime \prime}(\mathbf{X}_{i,*}\hat{\boldsymbol{\beta}}) = \frac{\exp(\mathbf{X}_{i,*}\hat{\boldsymbol{\beta}})}{[1 + \exp(\mathbf{X}_{i,*}\hat{\boldsymbol{\beta}})]^2}$ appears from differentiating twice the loss function $\ell (\hat{\beta})$ , and $\hat{\beta} = \hat{\beta}_{\lambda}$ is defined in Eq. (2). On the other hand, we can obtain $\hat{\beta}_j^{d_y,j}$ from $\hat{\beta}$ by simply omitting the $j$ -th coefficient from it, i.e.
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
\hat {\boldsymbol {\beta}} ^ {d _ {y, j}} \stackrel {{\mathrm {d e f .}}} {{=}} \left(\hat {\boldsymbol {\beta}} _ {1}, \hat {\boldsymbol {\beta}} _ {2}, \dots , \hat {\boldsymbol {\beta}} _ {j - 1}, \hat {\boldsymbol {\beta}} _ {j + 1}, \dots , \hat {\boldsymbol {\beta}} _ {p}\right).
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
Finally, the equation for decorrelated test score, adapted from both Eq. (5) and (6), reads
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
T _ {j} ^ {\text {d e c o r r}} = - \frac {1}{\sqrt {n}} \hat {\mathbf {I}} _ {j | - j} ^ {- 1 / 2} \sum_ {i = 1} ^ {n} \left[ y _ {i} - g \left(\mathbf {X} _ {i, - j} \hat {\boldsymbol {\beta}} ^ {d _ {y, j}}\right) \right] \left[ \mathbf {X} _ {i, j} - \mathbf {X} _ {i, - j} \hat {\boldsymbol {\beta}} ^ {d _ {\mathbf {X} _ {* , j}}} \right], \tag {11}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
where the formula for the empirical partial Fisher information is $\hat{\mathbf{I}}_{j| - j} = n^{-1}\sum_{i = 1}^{n}g^{\prime \prime}(\mathbf{X}_{i,*}\hat{\boldsymbol{\beta}})(\mathbf{X}_{i,j} - \mathbf{X}_{i, - j}\hat{\boldsymbol{\beta}}^{d}\mathbf{x}_{*,j})\mathbf{X}_{i,j}$ . A summary of the full procedure, which we call CRT-logit, can be found in Algorithm 1. Notice that the runtime of CRT-logit is the same as dCRT, which means in general slower than KO and HRT. To speedup inference time, we introduce a variable-screening step that eliminates potentially unimportant variables before distillation, similar to dCRT. We provide empirical benchmark of the runtime of each method in Section 4.5.
|
| 143 |
+
|
| 144 |
+
Setting $\ell_{1}$ -regularization parameter $\lambda$ and $\lambda_{dx}$ In general, we advise to use cross-validation for obtaining $\hat{\beta}_{\lambda}$ in Eq. (2) and for $\mathbf{X}_{*,j}$ -distillation operator, as defined by Eq. (10). This is in line with the theoretical argument for dCRT [19, Lemma 1 and Theorem 3]. However, we also observe empirically that choosing the $\ell_{1}$ -regularization parameters of the distillation step can strongly affect how variables are selected by CRT-logit. We provide more details in the Supplementary Material, and leave further theoretical investigations of this phenomenon for future work.
|
| 145 |
+
|
| 146 |
+
# Algorithm 1: CRT-logit
|
| 147 |
+
|
| 148 |
+
1 INPUT design matrix $\mathbf{X}\in \mathbb{R}^{n\times p}$ , reponds $\mathbf{y}\in \mathbb{R}^n$
|
| 149 |
+
2 OUTPUT vector of p-values $\{p_j\}_{j = 1}^p$
|
| 150 |
+
3 $\hat{\beta}_{\lambda}\gets$ solve_sparse_logistic(cv(X,y) // Using Eq. (2)
|
| 151 |
+
4 SSCREENING $\leftarrow \{j:j\in [p],\hat{\beta}_j\neq 0\}$
|
| 152 |
+
5 for $j\in \hat{S}^{SCREENING}$ do
|
| 153 |
+
6 $\hat{\beta}^{d_{\mathbf{X},*j}}\gets$ solveScaled_lasso(cv(X_,j,X_,-j)) // Using Eq. (10)
|
| 154 |
+
7 $\hat{\beta}^{d_y,j}\gets (\hat{\beta}_1,\hat{\beta}_2,\dots ,\hat{\beta}_{j - 1},\hat{\beta}_{j + 1},\dots ,\hat{\beta}_p)$
|
| 155 |
+
8 $T_{j}^{\mathrm{decorr}}\gets$ decorrelated_test_score(j,X,y, $\hat{\beta}^{d_{\mathbf{X},*j}},\hat{\beta}^{d_{y,j}})$ // Using Eq (11)
|
| 156 |
+
9 $\widehat{p}_j\gets 2[1 - |\Phi (T_j^{\mathrm{decorr}})|]$
|
| 157 |
+
10 end
|
| 158 |
+
11 for $j\notin \hat{S}^{SCREENING}$ do
|
| 159 |
+
12 $\begin{array}{rl}{\widehat{p}_j=1}&{}\\{\widehat{p}_j=1}&{}\end{array}$
|
| 160 |
+
13 end
|
| 161 |
+
|
| 162 |
+
Asymptotic analysis of the Decorrelated Test Statistic We now provide theoretical analysis of CRT-logit in large-sample regime. All the proofs can be found in the Supplementary Material. First, we introduce the following assumption.
|
| 163 |
+
|
| 164 |
+
# Assumption 3.1 (Regularity conditions). Assume that
|
| 165 |
+
|
| 166 |
+
(A1) $\lambda_{\min}(\mathbf{I}) \geq K$ for some constant $K > 0$ .
|
| 167 |
+
(A2) Sparsity of $\beta^0$ and $\mathbf{w}^{0,j}$ , with $\mathbf{w}^{0,j}$ the ground truth weights for the distillation of $\mathbf{x}_j$ in Eq. (10): $|S| = s^*$ and $\| \mathbf{w}^{0,j}\|_0 = s'$ with $s^* = o\big(n^{1/2} / \log(p)\big)$ and $s' = o\big(n^{1/2} / \log(p)\big)$ .
|
| 168 |
+
(A3) For all $i \in [n]$ , $\mathbf{X}_{i,*}$ and $(-y_i + g'(\mathbf{X}_{i,*}\beta))$ are sub-exponential random variables, and $|\mathbf{X}_{i,-j}\mathbf{w}^{0,j}| \leq K'$ almost surely, for some constant $K' > 0$ .
|
| 169 |
+
|
| 170 |
+
We then have the following result, that states that the asymptotic distribution of the decorrelated test scores is standard normal.
|
| 171 |
+
|
| 172 |
+
Theorem 3.1. Let $j \in [p]$ , and let $T_{j}^{\text{decorr}}$ be defined as in Eq. (11), with $\lambda \asymp \lambda_{dx} \asymp \sqrt{n^{-1}\log(p)}$ . Then, if Assumption 3.1 holds true, and if we consider $p = p(n)$ , under the null hypothesis $\mathcal{H}_0^j : \beta_j^0 = 0$ , we have
|
| 173 |
+
|
| 174 |
+
$$
|
| 175 |
+
\forall t \in \mathbb {R}, \quad \lim _ {n \to \infty} | \mathbb {P} _ {\beta^ {0}} (T _ {j} ^ {d e c o r r} \leq t) - \Phi (t) | = 0,
|
| 176 |
+
$$
|
| 177 |
+
|
| 178 |
+
where $\Phi(\cdot)$ is the CDF of the standard Gaussian distribution. Moreover, for each $j \in [p]$ , if we define $\widehat{p}_j \stackrel{\text{def.}}{=} 2[1 - \Phi(T_j)]$ , i.e. $\widehat{p}_j$ is the output of Algorithm 1, then, under the null hypothesis $\mathcal{H}_0^j: \beta_j^0 = 0$ , we have
|
| 179 |
+
|
| 180 |
+
$$
|
| 181 |
+
\limsup_{n\to \infty}\mathbb{P}_{\beta^{0}}(\widehat{p}_{j}\leq t)\leq t\quad for all t\in [0,1],
|
| 182 |
+
$$
|
| 183 |
+
|
| 184 |
+
that is, the $p$ -values output by Algorithm 1 are valid asymptotically.
|
| 185 |
+
|
| 186 |
+
Remark 3.1. We also show in the proof of Theorem 3.1 (in Supplementary Material) that the rate of convergence is $\mathcal{O}(1 / \sqrt{n})$ . Compared with some of the related works (e.g., knockoffs) that come with finite sample guarantees, our theoretical analysis only works in the asymptotic regime. We leave the finite sample analysis as one of the directions for future works.
|
| 187 |
+
|
| 188 |
+
FDR control with CRT-logit As a consequence of Theorem 3.1, we have the following result, which establishes that the FDR of the test is controlled when using the Benjamini-Yekutieli procedure [6] with the p-values output from Algorithm 1, assuming that the number of tests $p$ is fixed.
|
| 189 |
+
|
| 190 |
+
Corollary 3.1. Under Assumptions 3.1 and logistic model defined in Eq (1), with $\lambda \asymp \lambda_{dx} \asymp \sqrt{n^{-1}\log(p)}$ , assume $n^{-1/2}(s' \vee s^*)\log(p) = o(1)$ , and assume the number of tests $p$ is fixed. Let $\alpha \in (0,1)$ and $\widehat{S}_{BY-CRT}$ be given by applying following the Benjamini-Yekutieli FDR-controlling procedure to the CRT-logit $p$ -values $\{\widehat{p}_j\}_{j \in [p]}$ , output from Algo.1. Then, we have
|
| 191 |
+
|
| 192 |
+
$$
|
| 193 |
+
\limsup_{n\to \infty}\mathbb{E}\left[\frac{\operatorname{card}(\widehat{\mathcal{S}}_{BY - CRT}\cap\mathcal{S}^{c})}{\operatorname{card}(\widehat{\mathcal{S}}_{BY - CRT})\lor 1}\right]\leq \alpha .
|
| 194 |
+
$$
|
| 195 |
+
|
| 196 |
+
Remark 3.2. Assumption 3.1 is also assumed in [21, 28], which also provide a detailed discussion of this regularity assumption in generalized linear models. This assumption, in turn, is built on the regularity assumption in the classic work [13, Chapter 9] to establish asymptotic normality of Rao's test statistic. Theorem 3.1 is an adaptation of [21, Theorem 3.1], specialized for the case of sparse logistic regression and the $p$ -values output from CRT-logit.
|
| 197 |
+
|
| 198 |
+
# 4 Empirical Results
|
| 199 |
+
|
| 200 |
+
We provide benchmarks of the proposed CRT-logit algorithm along with most other methods mentioned in the introduction, in particular: model-X Knockoff (KO) [10], Debiased Lasso (dLasso) [33, 16], original CRT with 1000 samplings [10], Holdout Randomization Test with 5000 samplings [26], and Lasso-Distillation CRT (dCRT) [19]. We did not include SLOE and CPT as the provided open-source implementation are particularly unstable and do not fit in the sparse-regression setting (for SLOE), or implementation is not available (for CPT). For the lack of space, we leave the extra experiment with a genome-wide association study in the Supplementary Material.
|
| 201 |
+
|
| 202 |
+
Remark 4.1. As a slight caveat, in the simulated and semi-realistic experiment sections (Sections 4.1, 4.2 and 4.3), we introduce an additional noise term to the logistic relationship of Eq. (1):
|
| 203 |
+
|
| 204 |
+
$$
|
| 205 |
+
\mathbb {P} \left(y _ {i} = 1 \mid \mathbf {x} _ {i}\right) = g \left(\mathbf {x} _ {i} ^ {T} \boldsymbol {\beta} ^ {0} + \sigma \xi_ {i}\right), \tag {12}
|
| 206 |
+
$$
|
| 207 |
+
|
| 208 |
+
where $\xi_{i} \sim \mathcal{N}(0,1)$ is a Gaussian noise and $\sigma > 0$ the noise magnitude. The formula in Eq. (12) has been used in previous works, e.g. [8]. There is a clear justification to this: in most of the applications we consider, data are collected with measurement errors. In the case of brain-imaging, for example, recording the brain signal of the human subjects by scanners often includes noise caused either from the machine, or from the movement of the subjects [18]. Moreover, in general, this setting corresponds to a model mis-specification, which the CRT-logit is robust to under Assumption 3.1, following the same argument as in [21, Section 5].
|
| 209 |
+
|
| 210 |
+
Remark 4.2. We use Benjamini-Hochberg step-up procedure [5] to control FDR with the $p$ -values in all the empirical experiments in Section 4.2 and App. 4.4, as we observe that the FDR is empirically controlled with this procedure, without compromising power with the conservative BY bound.
|
| 211 |
+
|
| 212 |
+
# 4.1 Effectiveness of the decorrelation step
|
| 213 |
+
|
| 214 |
+
To show how decorrelating the test statistics helps, we set up a simulation with matrix $\mathbf{X}$ of $p = 400$ features and vary the number of samples $n\in \{200,400,800,4000\}$ . The binary response vector $\mathbf{y}$ is created following Eq. (12), and the design matrix $\mathbf{X}$ is sampled from a multivariate normal distribution with zero mean, while the covariance matrix $\boldsymbol{\Sigma}\in \mathbb{R}^{p\times p}$ is a symmetric Toeplitz matrix, where the parameter $\rho \in (0,1)$ controls the correlation between neighboring features: correlation decreases quickly when the distance between feature indices increases. The true signal $\beta^0$ is picked with a sparsity parameter $\kappa = s^{*} / p$ that controls the proportion of non-zero elements with magnitude 2.0, i.e. $\beta_{j} = 2.0$ for all $j\in S$ . For the specific purpose of this experiment, non-zero indices of $S$ are kept fixed. The noise $\xi$ is i.i.d. normal $\mathcal{N}(0,\mathbf{Id}_n)$ with magnitude $\sigma = \| \mathbf{X}\boldsymbol{\beta}^0\|_2 / (\sqrt{n}\mathrm{SNR})$ , controlled by the SNR parameter. In short, the three main parameters controlling this simulation are correlation $\rho$ , sparsity degree $\kappa$ and signal-to-noise ratio SNR. We generate randomly 1000 datasets, and run dCRT and CRT-logit algorithm to obtain one sample of test statistics $\{T_j\}_{j=1}^p$ and $\{T_j^{\mathrm{decorr}}\}_{j=1}^p$ . Then, we pick 1000 samples of one null test statistic $T_j$ and $T_j^{\mathrm{decorr}}$ , defined in Eq. (5) and (11), respectively, and plot the qq-plot of their empirical quantile versus the standard normal quantile. From the results in Figure 6, we observe that the empirical null distribution of the test
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
(a) $\mathbf{n} = 200$
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
(b) $\mathbf{n} = 400$
|
| 225 |
+
|
| 226 |
+

|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
(c) $\mathbf{n} = 800$
|
| 230 |
+
Figure 2: QQ-Plot for one null CRT statistic for logistic regression, with varying number of samples and a fixed number of variables $p = 400$ . The theoretical quantiles are obtained from a standard Gaussian distribution. The decorrelation step makes the empirical null distribution of the null statistics much closer to standard Gaussian. Parameters: $\mathrm{SNR} = 3.0$ , $\rho = 0.4$ , sparsity $= 0.06$ . Upper row: Distilled-CRT statistic defined by Eq. (5). Bottom row: CRT-logit, with decorrelated test score defined by Eq. (11) (ours).
|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
|
| 234 |
+

|
| 235 |
+
(d) $\mathbf{n} = 4000$
|
| 236 |
+
|
| 237 |
+
statistic is much closer to a standard normal when adding the decorrelation step. In particular, when the sample size $n$ increases to 400, the decorrelated test statistic has empirical quantiles almost inline with the theoretical quantiles of the standard normal distribution, while dCRT test score strays away from the 45-degree line. Again, we emphasize that the normality of $T_{j}$ is crucial for the p-values calculation. This outlines the importance of the decorrelating step on $T_{j}$ .
|
| 238 |
+
|
| 239 |
+
# 4.2 High-dimensional scenario with varying simulation parameters
|
| 240 |
+
|
| 241 |
+
To see how each algorithm performs under different settings, we follow the same simulation scenario as in Sec. 4.1, but vary each of the three simulation parameters, while keeping the others unchanged
|
| 242 |
+
|
| 243 |
+
at a default value of $\mathrm{SNR} = 2.0$ , $\rho = 0.5$ , $\kappa = 0.04$ . We target a control of FDR at level 0.1, using the Benjamini-Hochberg procedure. The results in Figure 3 show that CRT-logit is the most powerful method while still controlling the FDR. Moreover, in the presence of higher correlations between nearby variables ( $\rho > 0.6$ ), other methods suffer a drop in average power, but this is not as severe for CRT-logit. The original CRT, in general, is conservative. We believe that this is due to using only $B = 500$ samplings to generate empirical p-values for the two methods, due to prohibitive average runtime of the algorithm with larger $B$ (which we provide in Section 4.5). For HRT, the conservativeness is expected, due to the usage of only half of the sample for test-statistics calculation - even though the number of samplings is bigger than original CRT ( $B = 5000$ ). We note that, perhaps surprisingly, the debiased lasso (cdlasso) is the most conservative. It controls FDR well in all settings. This might be due to the fact that dlasso also relies on the choice of the $\ell_1$ -regularization $\lambda$ in the nodewise Lasso operation, similar to the $\mathbf{X}_{*,j}$ -distillation of dCRT, as noted in Section 1. What makes the difference is that instead of using cross-validation for setting $\lambda$ for each variable $j$ , a fixed value of $\lambda = 10^{-2}\lambda_{max}$ is used in the implementation of dlasso. We strongly suspect this fixed value is not optimal, which makes the procedure powerless.
|
| 244 |
+
|
| 245 |
+
# 4.3 Application: large-scale analysis on brain-imaging dataset
|
| 246 |
+
|
| 247 |
+
Description The Human Connectome Project dataset (HCP) is a collection of brain imaging data on healthy young adult subjects with age ranging from 22 to 35. More specifically, the input $\mathbf{X}$ is a set of $2\mathrm{mm}$ statistical maps of 400 subjects across 8 cognitive tasks. These are called z-maps, as the data follow a standard normal distribution under the null hypothesis. Each task in turn features 2 different contrasts, which effectively form binary responses $\mathbf{y} \in \{0,1\}^n$ . In short, the goal of this fMRI data analysis is to identify voxels with task-related levels of activity by fitting $\mathbf{y}$ through distributed brain signals. The setting is high-dimensional with $n = 800$ samples, corresponding to 400 subjects, while the total number of variables is $p \approx 200,000$ brain voxels. Following [11, 20], we use a hierarchical clustering scheme to group the variables into $C = 1000$ spatially connected clusters. We provide details of the pre-processing step in Supplementary Material.
|
| 248 |
+
|
| 249 |
+
Creating semi-realistic ground-truth and response labels Since there is no ground truth for this dataset, we create synthetic true signals by fitting the data $\mathbf{X}$ and response $\mathbf{y}$ with an $\ell_1$ -penalized logistic classifier. In other words, the estimator $\hat{\beta}^{\mathrm{logreg}}$ will serve as true regression coefficients for each task. Then, to avoid bias in simulating label $\hat{\mathbf{y}}$ , the z-maps matrix $\mathbf{X}$ of one task are used in conjunction with the discriminative pattern map $\hat{\beta}^{\mathrm{logreg}}$ from the next task in the following order: relational, gambling, emotion, social. For instance, we use $\hat{\beta}^{\mathrm{logreg}}$
|
| 250 |
+
|
| 251 |
+
of gambling with z-maps data matrix of relational, i.e. for all $i = 1,\dots ,n$ given $\mathbf{x}_{i,\mathrm{relational}}$
|
| 252 |
+
|
| 253 |
+
$$
|
| 254 |
+
\hat {y} _ {i} \sim \operatorname {B e r n} \left\{g \left(\mathbf {x} _ {i, \text {r e l a t i o n a l}} ^ {\top} \hat {\beta} _ {\text {g a m b l i n g}} ^ {\log \text {r e g}} + \sigma \xi_ {i}\right) \right\}, \tag {13}
|
| 255 |
+
$$
|
| 256 |
+
|
| 257 |
+
where $\operatorname{Bern}(a)$ is a Bernoulli probability mass function that takes a value 1 with probability $a$ , $\sigma$ is a noise magnitude and $\xi_{i}$ is a standard normal noise. Finally, we apply all inference algorithms on the semi-synthetic data $(\mathbf{X},\hat{\mathbf{y}})$ , and we evaluate their performance using the ground-truth $\hat{\beta}^{\mathrm{logreg}}$ . This simulation setting is similar to [11], except that here we consider a classification and not a regression
|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
|
| 261 |
+

|
| 262 |
+
|
| 263 |
+

|
| 264 |
+
Figure 3: FDR/Average Power of 100 runs of simulations across varying parameters in high-dimensional settings. Default parameter: $n = 400, p = 600$ , $\mathrm{SNR} = 2.0$ , $\rho = 0.5$ , $\kappa = 0.04$ . FDR is controlled at level $\alpha = 0.1$ . Methods: Debiased Lasso (dlasso), model-X Knockoff (K0-logit), original CRT (CRT), HRT (HRT), dCRT (dCRT), and our version of CRT (dark green line - CRT-logit).
|
| 265 |
+
|
| 266 |
+
ational, i.e. for all $i = 1,\dots ,n$ given $\mathbf{x}_{i,\mathrm{relational}}$
|
| 267 |
+
|
| 268 |
+
problem. It allows us to calculate the False Discovery Rate and average power with multiple runs of the inference procedure (across tasks).
|
| 269 |
+
|
| 270 |
+
Remark 4.3. The i.i.d. assumption is formally violated in this experiment, where for each subject we analyze two sample images that are not independent. Yet, this remains a short-range correlation structure, and is thus not a strong challenge to the i.i.d. assumption.
|
| 271 |
+
|
| 272 |
+
Results The results in Figure 4 show that CRT-logit achieves a better recovery compared to KO or original CRT/dCRT/HRT, which results in higher statistical power. This gain comes with a good control of the FDR under desired level $\alpha = 0.1$ . On a related note, the only analysis where dCRT makes more discoveries than CRT-Logit is in the emotion task, but at the cost of failing to control FDR at the nominal level.
|
| 273 |
+
|
| 274 |
+

|
| 275 |
+
Figure 4: FDR/Average Power of 50 runs of semi-realistic experiments on four tasks of Human Connectome Project dataset. Parameters: $n = 800$ (taken from 400 subjects), $\mathrm{SNR} = 2.0$ . Methods (clustering versions): Debiased Lasso (cdlasso), model-X Knockoff (cK0-logit), original CRT (CRT), HRT (HRT), dCRT (dCRT), and our version of CRT (dark green line - CRT-logit).
|
| 276 |
+
|
| 277 |
+
# 4.4 Application: genome-wide association study with Human Brain Cancer Dataset
|
| 278 |
+
|
| 279 |
+
Description The last in our benchmark is a Genome-wide Association Study (GWAS) on the The Cancer Genome Atlas (TCGA) dataset [30, 31]. We choose to analyze the Glioma cohort, which consists of $n = 1026$ patients across a wide age range, diagnosed with this type of brain tumor, with a total of $p = 24776$ genes in the data matrix recorded as copy number variations (CNVs) at the gene level in log ratio format. As with the brain-imaging inference in Section 4.3, we use clustering to reduce the dimension to $C = 1000$ clusters. However, we use different criterion to merge variables (genes) to clusters of variables, which is the pairwise Linkage Disequilibrium, following [1, Section 4] (with available R library). For the response, a long-term survivor (LTS) is defined as a patient who survived more than five years after diagnosis and would be labeled $y = 0$ , and otherwise would be a short-term survivor (STS), labeled $y = 1$ . The objective is to identify significant genes that contribute to classification of the LTS/STS status. Similar to the Human Connectome Project dataset, there is no real ground-truth for the TCGA Glioma. However, we have the list of mutations and the frequency of those detected in the diagnosed patients. We therefore select the 1000 most frequent gene mutations that appeared in this list, i.e. the ground truth list consists of 1000 genes (variables).
|
| 280 |
+
|
| 281 |
+
Table 1: List of detected genes associated with Glioma Cancer from the TCGA dataset. $n = 1026$ , $p = 24776$ (clustered to $C = 1000$ ). Empty line (—) signifies no detection. Methods listed in the table are the clustering version. Commonly detected genes between methods are put in bold text. Most detected genes are listed in the mutant list database that can be found in the recorded patients [30].
|
| 282 |
+
|
| 283 |
+
<table><tr><td>Methods</td><td>Detected Genes</td></tr><tr><td>dLasso</td><td>—</td></tr><tr><td>KO</td><td>ABCC10, ANK3, CDH23, PTEN, SPEN, SVIL, ZMIZ1</td></tr><tr><td>dCRT</td><td>ANK3, ANKRD30A, CDH23, PTEN, RET, SPEN, ZMIZ1</td></tr><tr><td>CRT-logit</td><td>ABCC10, ANKRD30A, BCOR, EPHA3, PPL, SPAG17, SPEN, SVIL, USP9X</td></tr><tr><td>Original CRT</td><td>ABCC10, BCOR, EPHA3, SPEN, SVIL</td></tr><tr><td>HRT</td><td>ABCC10, SPEN</td></tr></table>
|
| 284 |
+
|
| 285 |
+
Result The result from Table 1 shows that CRT-logit finds the largest number of genes. Moreover, most of selected genes in this table are detected in the list of mutated genes found on recorded patients. Some genes are detected by all the benchmarked methods, most prominently SPEN, which is found on over $10\%$ of patients in the cohort. Furthermore, this gene is known to be associated not only with brain cancer, but also with other types of cancer in The Human Protein Atlas project [17]. Note that, in the absence of a ground-truth, this does not guarantee all genes found are associated with glioma, but this experiment demonstrates the capability of CRT-logit in GWAS studies.
|
| 286 |
+
|
| 287 |
+
# 4.5 Average runtime of benchmarked methods
|
| 288 |
+
|
| 289 |
+
Table 2: Average runtime of benchmarked methods for one simulation (in seconds). Standard error is reported in parentheses.
|
| 290 |
+
|
| 291 |
+
<table><tr><td>Methods</td><td>Simulated (Sec. 4.2)</td><td>HCP-semi-real (Sec. 4.3)</td></tr><tr><td>Debiased Lasso [33, 28, 16]</td><td>61.83 (5.2)</td><td>154.27 (8.79)</td></tr><tr><td>Knockoff Filter [4, 10]</td><td>1.62 (0.02)</td><td>8.12 (0.62)</td></tr><tr><td>CRT (500 samplings) [10]</td><td>2312.91 (38.21)</td><td>7069.96 (109.09)</td></tr><tr><td>HRT (5000 samplings) [26]</td><td>14.84 (2.01)</td><td>52.17 (4.11)</td></tr><tr><td>dCRT[screening=True] [19]</td><td>16.83 (1.89)</td><td>65.18 (3.91)</td></tr><tr><td>dCRT[screening=False] [19]</td><td>370.12 (8.18)</td><td>962.40 (20.63)</td></tr><tr><td>CRT-logit[screening=True] (this work)</td><td>14.16 (0.35)</td><td>61.26 (3.55)</td></tr><tr><td>CRT-logit[screening=False] (this work)</td><td>367.91 (4.11)</td><td>983.78 (17.26)</td></tr></table>
|
| 292 |
+
|
| 293 |
+
Besides statistical performance, it is equally important to assess the computational cost of inference procedures. The average runtime in Table 2 from the two experiments shows that the original CRT is not suitable for large-scale inference: it is over 2000 times slower than the fastest method (Knockoff Filter), and over 150 times slower than dCRT/CRT-logit. The empirical runtime also confirms the effectiveness of the screening step before doing distillation/decorrelation of the test-statistics: the step makes CRT-logit and dCRT 20 times faster than without. On a related note, although in theory Debiased Lasso, dCRT and CRT-logit (both without screening) share the same runtime complexity, the latter two are slower due to the use of cross-validation to estimate the sparsity hyperparameter $\lambda$ and $\lambda_{dx}$ (detailed in Section 3).
|
| 294 |
+
|
| 295 |
+
# 5 Discussion
|
| 296 |
+
|
| 297 |
+
We proposed an adaptation of the Conditional Randomization Test (CRT) for sparse logistic regression in the high-dimensional regime. A major improvement of CRT-logit, our proposed algorithm, compared to original CRT, comes from the decorrelation of test statistics to make their distribution closer to standard normal. Indeed, results from synthetic experiments in Figure 6 show that in high-dimension (when $0.5 \leq n / p \leq 1.0$ ), the empirical null distribution of CRT-logit's test statistic $T^{\mathrm{decorr}}$ is much more similar to a standard normal compared to the original CRT test statistic. Moreover, empirical benchmarks in Section 4 demonstrate that CRT-logit performs better than related statistical inference methods, such as the Debiased Lasso or Model-X Knockoffs. In particular, CRT-logit is the most powerful method in our synthetic experiment with high-dimensional datasets in Section 4.2, while still keeping FDR controlled under predefined level $\alpha = 0.1$ (with a slight caveat of using BH instead of BY procedure, as elaborated in Remark 4.2). We note that there exist some limitations to CRT-logit. The computational cost of CRT-logit, while lower than vanilla CRT, is still larger than alternative methods such as Knockoff Filter and Holdout Randomization Test. Moreover, tuning the $\ell_1$ -regularization $\lambda_{dx}$ parameter by cross-validation, as is often done, can further increase the computational cost of CRT-logit (and dCRT). Despite this, our empirical benchmarks on both simulated and real data show real promises of CRT-logit. Henceforth, we believe CRT-logit is competitive for practical settings that involve structured data, such as brain-imaging and genomics applications.
|
| 298 |
+
|
| 299 |
+
# Acknowledgements
|
| 300 |
+
|
| 301 |
+
BN, BT and SA acknowledged the support of the French "Agence Nationale de la Recherche" under the project ANR-17-CE23-0011 (FastBig) and ANR-20-CHIA-0025-01 (KARAIB AI chair). SA was also supported by Institut Universitaire de France (IUF), and BN was also supported by Research Chair DSAIDIS (Data Science and Artificial Intelligence for Digitalized Industry and Services) of Telecom Paris.
|
| 302 |
+
|
| 303 |
+
# References
|
| 304 |
+
|
| 305 |
+
[1] Christophe Ambroise, Alia Dehman, Pierre Neuvial, Guillem Rigaill, and Nathalie Vialaneix. Adjacency-constrained hierarchical clustering of a band similarity matrix with application to genomics. Algorithms for Molecular Biology, 14(1):1-14, 2019.
|
| 306 |
+
[2] Francis Bach. Self-concordant analysis for logistic regression. Electronic Journal of Statistics, 4(none), January 2010.
|
| 307 |
+
[3] Francis Bach, Rodolphe Jenatton, Julien Mairal, Guillaume Obozinski, et al. Optimization with sparsity-inducing penalties. Foundations and Trends® in Machine Learning, 4(1):1-106, 2012.
|
| 308 |
+
[4] Rina Foygel Barber and Emmanuel J. Candès. Controlling the false discovery rate via knockoffs. The Annals of Statistics, 43(5):2055–2085, October 2015. arXiv: 1404.5609.
|
| 309 |
+
[5] Yoav Benjamini and Yosef Hochberg. Controlling the False Discovery Rate: A Practical and Powerful Approach to Multiple Testing. Journal of the Royal Statistical Society. Series B (Methodological), 57(1):289-300, 1995.
|
| 310 |
+
[6] Yoav Benjamini and Daniel Yekutieli. The control of the false discovery rate in multiple testing under dependency. Ann. Statist., 29(4):1165-1188, 08 2001.
|
| 311 |
+
[7] Thomas B. Berrett, Yi Wang, Rina Foygel Barber, and Richard J. Samworth. The conditional permutation test for independence while controlling for confounders. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 82(1):175-197, 2020. _eprint: https://rss.onlinelibrary.wiley.com/doi/pdf/10.1111/rssb.12340.
|
| 312 |
+
[8] Danilo Bzdok, Michael Eickenberg, Olivier Grisel, Bertrand Thirion, and Gael Varoquaux. Semi-Supervised Factored Logistic Regression for High-Dimensional Neuroimaging Data. In C. Cortes, N. Lawrence, D. Lee, M. Sugiyama, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 28. Curran Associates, Inc., 2015.
|
| 313 |
+
[9] Peter Buhlmann, Philipp Rütimann, Sara van de Geer, and Cun-Hui Zhang. Correlated variables in regression: clustering and sparse estimation. Journal of Statistical Planning and Inference, 143(11):1835-1858, November 2013. arXiv: 1209.5908.
|
| 314 |
+
[10] Emmanuel Candès, Yingying Fan, Lucas Janson, and Jinchi Lv. Panning for gold: 'model-x' knockoffs for high dimensional controlled variable selection. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 80(3):551-577, 2018.
|
| 315 |
+
[11] Jérôme-Alexis Chevalier, Tuan-Binh Nguyen, Joseph Salmon, Gáel Varoquaux, and Bertrand Thirion. Decoding with confidence: Statistical control on decoder maps. NeuroImage, 234:117921, 2021.
|
| 316 |
+
[12] Jérôme-Alexis Chevalier, Tuan-Binh Nguyen, Bertrand Thirion, and Joseph Salmon. Spatially relaxed inference on high-dimensional linear models. arXiv:2106.02590 [math, stat], June 2021. arXiv: 2106.02590.
|
| 317 |
+
[13] David Roxbee Cox and David Victor Hinkley. Theoretical statistics. CRC Press, 1979.
|
| 318 |
+
[14] Wenjiang Fu and Keith Knight. Asymptotics for lasso-type estimators. The Annals of statistics, 28(5):1356-1378, 2000.
|
| 319 |
+
[15] Trevor Hastie, Robert Tibshirani, and Jerome Friedman. The elements of statistical learning: data mining, inference and prediction. 2009. OCLC: 1058138445.
|
| 320 |
+
[16] Adel Javanmard and Andrea Montanari. Confidence intervals and hypothesis testing for high-dimensional regression. The Journal of Machine Learning Research, 15(1):2869-2909, 2014.
|
| 321 |
+
[17] Stéphanie Légaré, Luca Cavallone, Aline Mamo, Catherine Chabot, Isabelle Sirois, Anthony Magliocco, Alexander Klimowicz, Patricia N Tonin, Marguerite Buchanan, Dana Keilty, et al. The estrogen receptor cofactor spen functions as a tumor suppressor and candidate biomarker of drug responsiveness in hormone-dependent breast cancers. Cancer research, 75(20):4351-4363, 2015.
|
| 322 |
+
|
| 323 |
+
[18] Martin A. Lindquist. The Statistical Analysis of fMRI Data. Statistical Science, 23(4), November 2008.
|
| 324 |
+
[19] Molei Liu, Eugene Katsevich, Lucas Janson, and Aaditya Ramdas. Fast and powerful conditional randomization testing via distillation. Biometrika, 07 2021.
|
| 325 |
+
[20] Tuan-Binh Nguyen, Jérôme-Alexis Chevalier, and Bertrand Thirion. Ecko: Ensemble of clustered knockoffs for robust multivariate inference on fmri data. In International Conference on Information Processing in Medical Imaging, pages 454-466. Springer, 2019.
|
| 326 |
+
[21] Yang Ning and Han Liu. A general theory of hypothesis tests and confidence regions for sparse high dimensional models. Annals of Statistics, 45(1):158-195, February 2017. Publisher: Institute of Mathematical Statistics.
|
| 327 |
+
[22] Dmitrii M Ostrovskii and Francis Bach. Finite-sample analysis of $m$ -estimators using self-concordance. Electronic Journal of Statistics, 15(1):326-391, 2021.
|
| 328 |
+
[23] Fabian Pedregosa, Gáel Varoquaux, Alexandre Gramfort, Vincent Michel, Bertrand Thirion, Olivier Grisel, Mathieu Blondel, Peter Prettenhofer, Ron Weiss, Vincent Dubourg, et al. Scikit-learn: Machine learning in python. Journal of machine learning research, 12(Oct):2825-2830, 2011.
|
| 329 |
+
[24] C Radhakrishna Rao. Large sample tests of statistical hypotheses concerning several parameters with applications to problems of estimation. In Mathematical Proceedings of the Cambridge Philosophical Society, volume 44, pages 50-57. Cambridge University Press, 1948.
|
| 330 |
+
[25] Pragya Sur and Emmanuel J. Candès. A modern maximum-likelihood theory for high-dimensional logistic regression. Proceedings of the National Academy of Sciences, 116(29):14516–14525, July 2019. Publisher: Proceedings of the National Academy of Sciences.
|
| 331 |
+
[26] Wesley Tansey, Victor Veitch, Haoran Zhang, Raul Rabadan, and David M. Blei. The Holdout Randomization Test for Feature Selection in Black Box Models. Journal of Computational and Graphical Statistics, 31(1):151-162, 2022. Publisher: Taylor & Francis _eprint: https://doi.org/10.1080/10618600.2021.1923520.
|
| 332 |
+
[27] Bertrand Thirion, Gael Varoquaux, Elvis Dohmatob, and Jean-Baptiste Poline. Which fMRI clustering gives good brain parcellations? Frontiers in Neuroscience, 8, 2014.
|
| 333 |
+
[28] Sara van de Geer, Peter Buhlmann, Ya'acov Ritov, and Ruben Dezeure. On asymptotically optimal confidence regions and tests for high-dimensional models. The Annals of Statistics, 42(3):1166-1202, June 2014. arXiv: 1303.0518.
|
| 334 |
+
[29] Gael Varoquaux, Alexandre Gramfort, and Bertrand Thirion. Small-sample brain mapping: sparse recovery on spatially correlated designs with randomization and clustering. Proceedings of the 29th International Conference on Machine Learning, Edinburgh, Scotland, UK, 2012, page 8, 2012.
|
| 335 |
+
[30] Suhas V Vasaikar, Peter Straub, Jing Wang, and Bing Zhang. LinkedOmics: analyzing multi-omics data within and across 32 cancer types. *Nucleic Acids Research*, 46(D1):D956–D963, January 2018.
|
| 336 |
+
[31] John N Weinstein, Eric A Collisson, Gordon B Mills, Kenna R Mills Shaw, Brad A Ozenberger, Kyle Ellrott, Ilya Shmulevich, Chris Sander, and Joshua M Stuart. The cancer genome atlas pan-cancer analysis project. Nature genetics, 45(10):1113-1120, 2013.
|
| 337 |
+
[32] Steve Yadlowsky, Taedong Yun, Cory Y McLean, and Alexander D' Amour. SLOE: A Faster Method for Statistical Inference in High-Dimensional Logistic Regression. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P. S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 29517-29528. Curran Associates, Inc., 2021.
|
| 338 |
+
[33] Cun-Hui Zhang and Stephanie S. Zhang. Confidence intervals for low dimensional parameters in high dimensional linear models. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 76(1):217-242, 2014. _eprint: https://rss.onlinelibrary.wiley.com/doi/pdf/10.1111/rssb.12026.
|
| 339 |
+
|
| 340 |
+
[34] Qian Zhao, Pragya Sur, and Emmanuel J. Candès. The asymptotic distribution of the MLE in high-dimensional logistic models: Arbitrary covariance. Bernoulli, 28(3):1835 - 1861, 2022. Publisher: Bernoulli Society for Mathematical Statistics and Probability.
|
| 341 |
+
|
| 342 |
+
# Checklist
|
| 343 |
+
|
| 344 |
+
1. For all authors...
|
| 345 |
+
|
| 346 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 347 |
+
(b) Did you describe the limitations of your work? [Yes]
|
| 348 |
+
(c) Did you discuss any potential negative societal impacts of your work? [No] We believe our work provides no potential negative societal impacts.
|
| 349 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 350 |
+
|
| 351 |
+
2. If you are including theoretical results...
|
| 352 |
+
|
| 353 |
+
(a) Did you state the full set of assumptions of all theoretical results? [Yes]
|
| 354 |
+
(b) Did you include complete proofs of all theoretical results? [Yes] The proofs can be found in Supplementary Material
|
| 355 |
+
|
| 356 |
+
3. If you ran experiments...
|
| 357 |
+
|
| 358 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes] Except for datasets of realistic applications, which are not included, but references are provided
|
| 359 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes] Both in main text and in Supplementary Material
|
| 360 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [Yes] With the simulated and semi-realistic experiment
|
| 361 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [No]
|
| 362 |
+
|
| 363 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 364 |
+
|
| 365 |
+
(a) If your work uses existing assets, did you cite the creators? [Yes]
|
| 366 |
+
(b) Did you mention the license of the assets? [N/A]
|
| 367 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [No]
|
| 368 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
|
| 369 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [N/A]
|
| 370 |
+
|
| 371 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 372 |
+
|
| 373 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
|
| 374 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 375 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
aconditionalrandomizationtestforsparselogisticregressioninhighdimension/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f65580e4a796dc9bbabb3d7fd69dfe530d9744d318cc3f3c067cffdd7870f1a
|
| 3 |
+
size 414971
|
aconditionalrandomizationtestforsparselogisticregressioninhighdimension/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:92d10830bdc5aeca05a5ec7b131a546b21ea4f4d69ffc360509748d8ef4e7b63
|
| 3 |
+
size 597793
|
aconsistentanddifferentiablelpcanonicalcalibrationerrorestimator/b31aa3fd-b339-43fb-b204-ff8038c186e3_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60061152157f70eeba7cb10ad913b0e759bc2267c15dacb174b6d89d048ac639
|
| 3 |
+
size 85357
|
aconsistentanddifferentiablelpcanonicalcalibrationerrorestimator/b31aa3fd-b339-43fb-b204-ff8038c186e3_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1bb4ae857180ae9a6a287fb7ba391edca1147518df0e151fd29d046ba5a89cee
|
| 3 |
+
size 110608
|
aconsistentanddifferentiablelpcanonicalcalibrationerrorestimator/b31aa3fd-b339-43fb-b204-ff8038c186e3_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e07f4bcb6cf14d64ba223e18d3d04f3d023572a13c386c09f32d5e847e3b6d9e
|
| 3 |
+
size 598700
|
aconsistentanddifferentiablelpcanonicalcalibrationerrorestimator/full.md
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Consistent and Differentiable $L_{p}$ Canonical Calibration Error Estimator
|
| 2 |
+
|
| 3 |
+
Teodora Popordanoska*
|
| 4 |
+
|
| 5 |
+
ESAT-PSI, KU Leuven
|
| 6 |
+
|
| 7 |
+
teodora.popordanoska@kuleuven.be
|
| 8 |
+
|
| 9 |
+
Raphael Sayer*†
|
| 10 |
+
|
| 11 |
+
University of Tübingen
|
| 12 |
+
|
| 13 |
+
raphael.sayer@uni-tuebingen.de
|
| 14 |
+
|
| 15 |
+
Matthew B. Blaschko
|
| 16 |
+
|
| 17 |
+
ESAT-PSI, KU Leuven
|
| 18 |
+
|
| 19 |
+
matthew.blaschko@esat.kuleuven.be
|
| 20 |
+
|
| 21 |
+
# Abstract
|
| 22 |
+
|
| 23 |
+
Calibrated probabilistic classifiers are models whose predicted probabilities can directly be interpreted as uncertainty estimates. It has been shown recently that deep neural networks are poorly calibrated and tend to output overconfident predictions. As a remedy, we propose a low-bias, trainable calibration error estimator based on Dirichlet kernel density estimates, which asymptotically converges to the true $L_{p}$ calibration error. This novel estimator enables us to tackle the strongest notion of multiclass calibration, called canonical (or distribution) calibration, while other common calibration methods are tractable only for top-label and marginal calibration. The computational complexity of our estimator is $\mathcal{O}(n^2)$ , the convergence rate is $\mathcal{O}(n^{-1/2})$ , and it is unbiased up to $\mathcal{O}(n^{-2})$ , achieved by a geometric series debiasing scheme. In practice, this means that the estimator can be applied to small subsets of data, enabling efficient estimation and mini-batch updates. The proposed method has a natural choice of kernel, and can be used to generate consistent estimates of other quantities based on conditional expectation, such as the sharpness of a probabilistic classifier. Empirical results validate the correctness of our estimator, and demonstrate its utility in canonical calibration error estimation and calibration error regularized risk minimization.
|
| 24 |
+
|
| 25 |
+
# 1 Introduction
|
| 26 |
+
|
| 27 |
+
Deep neural networks have shown tremendous success in classification tasks, being regularly the best performing models in terms of accuracy. However, they are also known to make overconfident predictions [Guo et al., 2017], which is particularly problematic in safety-critical applications, such as medical diagnosis [Esteva et al., 2017, 2019] or autonomous driving [Caesar et al., 2020, Sun et al., 2020]. In many real world applications it is not only the predictive performance that is important, but also the trustworthiness of the prediction, i.e., we are interested in accurate predictions with robust uncertainty estimates. To this end, it is necessary that the models are uncertainty calibrated, which means that, for instance, among all cells that have been predicted with a probability of 0.8 to be cancerous, $80\%$ should indeed belong to a malignant tumor.
|
| 28 |
+
|
| 29 |
+
The field of uncertainty calibration has been mostly focused on binary problems, often considering only the confidence score of the predicted class. However, this so called top-label (or confidence) calibration [Guo et al., 2017]) is often not sufficient in multiclass settings. A stronger notion of calibration is marginal (or class-wise) [Kull et al., 2019], that splits up the multiclass problem
|
| 30 |
+
|
| 31 |
+
Table 1: Properties of ${ECE}^{KDE}$ and other commonly used calibration error estimators.
|
| 32 |
+
|
| 33 |
+
<table><tr><td></td><td colspan="4">Properties</td></tr><tr><td></td><td>Consistency</td><td>Scalability</td><td>De-biased</td><td>Differentiable</td></tr><tr><td>ECEKDE (Our)</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>ECEbin</td><td>✗ [Vaicenavicius et al., 2019]</td><td>✗</td><td>✓ [Roelofs et al., 2022]</td><td>✗</td></tr><tr><td>Mix-n-Match</td><td>✓ [Zhang et al., 2020]</td><td>✗</td><td>✗</td><td>✓</td></tr><tr><td>MMCE</td><td>✗ [Kumar et al., 2018]</td><td>✓</td><td>✗</td><td>✓</td></tr></table>
|
| 34 |
+
|
| 35 |
+
into $K$ one-vs-all binary ones, and requires each to be calibrated according to the definition of binary calibration. The most strict notion of calibration, called canonical (or distribution) calibration [Brocker, 2009, Kull and Flach, 2015, Vaicenavicius et al., 2019], requires the whole probability vector to be calibrated. The curse of dimensionality makes estimation of this form of calibration difficult, and current estimators, such as the binned estimator $ECE^{bin}$ [Naeini et al., 2015], MMCE [Kumar et al., 2018] and Mix-n-Match [Zhang et al., 2020], have computational or statistical limitations that prevent them from being successfully applied in this important setting. Specifically, the binned estimator is sensitive to the binning scheme and is asymptotically inconsistent in many situations [Vaicenavicius et al., 2019], MMCE is not a consistent estimator of $L_{p}$ calibration error, and Mix-n-Match, although consistent, is intractable in high dimensions and the authors did not implement it in more than one dimension.
|
| 36 |
+
|
| 37 |
+
We propose a tractable, differentiable, and consistent estimator of the expected $L_{p}$ canonical calibration error. In particular, we use kernel density estimates (KDEs) with a Beta kernel in binary classification tasks and a Dirichlet kernel in the multiclass setting, as these kernels are the natural choices to model densities over a probability simplex. In Table 1, we summarize and compare the properties of our $ECE^{KDE}$ estimator and other commonly used estimators. $ECE^{KDE}$ scales well to higher dimensions and it is able to capture canonical calibration with $\mathcal{O}(n^2)$ complexity.
|
| 38 |
+
|
| 39 |
+
Our contributions can be summarized as follows: 1. We develop a tractable estimator of canonical $L_{p}$ calibration error that is consistent and differentiable. 2. We demonstrate a natural choice of kernel. Due to the scaling properties of Dirichlet kernel density estimation, evaluating canonical calibration becomes feasible in cases that cannot be estimated using other methods. 3. We provide a second order debiasing scheme to further improve the convergence of the estimator. 4. We empirically evaluate the correctness of our estimator and demonstrate its utility in the task of calibration regularized risk minimization on various network architectures and several datasets.
|
| 40 |
+
|
| 41 |
+
# 2 Related Work
|
| 42 |
+
|
| 43 |
+
Calibration of probabilistic predictors has long been studied in many fields. This topic gained attention in the deep learning community since Guo et al. [2017] observed that modern neural networks are poorly calibrated and tend to give overconfident predictions due to overfitting on the NLL loss. The surge of interest resulted in many calibration strategies that can be split in two general categories, which we discuss subsequently.
|
| 44 |
+
|
| 45 |
+
Post-hoc calibration strategies learn a calibration map of the predictions from a trained predictor in a post-hoc manner, using a held-out calibration set. For instance, Platt scaling [Platt, 1999] fits a logistic regression model on top of the logit outputs of the model. A special case of Platt scaling that fits a single scalar, called temperature, has been popularized by Guo et al. [2017] as an accuracy-preserving, easy to implement and effective method to improve calibration. However, it has the undesired consequence that it clamps the high confidence scores of accurate predictions [Kumar et al., 2018]. Similar approaches for post-hoc calibration include histogram binning [Zadrozny and Elkan, 2001], isotonic regression [Zadrozny and Elkan, 2002], Bayesian binning into quantiles [Naeini and Cooper, 2016], Beta [Kull et al., 2017] and Dirichlet calibration [Kull et al., 2019]. Recently, Gupta et al. [2021] proposed a binning-free calibration measure based on the Kolmogorov-Smirnov test. In this approach, the recalibration function is obtained via spline-fitting, rather than minimizing a loss function on a calibration set. Ma et al. [2021] integrate ensemble-based and post-hoc calibration methods in an accuracy-perserving truth discovery framework. Zhao et al. [2021] introduce a new notion of calibration, called decision calibration, however, they do not propose an estimator of calibration error with statistical guarantees.
|
| 46 |
+
|
| 47 |
+
Trainable calibration strategies integrate a differentiable calibration measure into the training objective. One of the earliest approaches is regularization by penalizing low entropy predictions [Pereyra et al., 2017]. Similarly to temperature scaling, it has been shown that entropy regularization needlessly suppresses high confidence scores of correct predictions [Kumar et al., 2018]. Another popular strategy is MMCE (Maximum Mean Calibration Error) [Kumar et al., 2018], where the entropy regularizer is replaced by a kernel-based surrogate for the calibration error that can be optimized alongside NLL. It has been shown that label smoothing [Szegedy et al., 2016, Müller et al., 2019], i.e. training models with a weighted mixture of the labels instead of one-hot vectors, also improves model calibration. Liang et al. [2020] propose to add the difference between predicted confidence and accuracy as auxiliary term to the cross-entropy loss. Focal loss [Mukhoti et al., 2020, Lin et al., 2017] has recently been empirically shown to produce better calibrated models than many of the alternatives, but does not estimate a clear quantity related to calibration error. Bohdal et al. [2021] derive a differentiable approximation to the commonly-used binned estimator of calibration error by computing differentiable approximations to the 0/1 loss and the binning operator. However, this approach does not eliminate the dependence on the binning scheme and it is not clear how it can be extended to calibration of the whole probability vector.
|
| 48 |
+
|
| 49 |
+
Kernel density estimation [Parzen, 1962, Rosenblatt, 1956, Silverman, 1986] is a non-parametric method to estimate a probability density function from a finite sample. Zhang et al. [2020] propose a KDE-based estimator of the calibration error (Mix-n-Match) for measuring calibration performance. Although they demonstrate consistency of the method, it requires a numerical integration step that is infeasible in high dimensions. In practice, they only implemented binary calibration, and not canonical calibration.
|
| 50 |
+
|
| 51 |
+
Although many calibration strategies have been empirically shown to decrease the calibration error, very few of them are based on an estimator of miscalibration. Our estimator is the first consistent, differentiable estimator with favourable scaling properties that has been successfully applied to the estimation of $L_{p}$ canonical calibration error in the multi-class setting.
|
| 52 |
+
|
| 53 |
+
# 3 Methods
|
| 54 |
+
|
| 55 |
+
We study a classical supervised classification problem. Let $(\Omega, \mathcal{A}, \mathbb{P})$ be a probability space, where $\Omega$ is the set of possible outcomes, $\mathcal{A} = \mathcal{A}(\Omega)$ is the sigma field of events and $\mathbb{P}: \mathcal{A} \to [0,1]$ is a probability measure, let $\mathcal{X} = \mathbb{R}^d$ and $\mathcal{Y} = \{1,\dots,K\}$ . Let $x: \Omega \to \mathcal{X}$ and $y: \Omega \to \mathcal{Y}$ be random variables, while realizations are denoted with subscripts. Suppose we have a model $f: \mathcal{X} \to \triangle^K$ , where $\triangle^K$ denotes the $K - 1$ dimensional simplex as obtained, e.g., from the output of a final softmax layer in a neural network. We measure the (mis-)calibration of the model in terms of $L_p$ calibration error, defined below.
|
| 56 |
+
|
| 57 |
+
Definition 3.1 (Calibration error, [Naeini et al., 2015, Kumar et al., 2019, Wenger et al., 2020]). The $L_{p}$ calibration error of $f$ is:
|
| 58 |
+
|
| 59 |
+
$$
|
| 60 |
+
\mathrm {C E} _ {p} (f) = \left(\mathbb {E} \left[ \left\| \mathbb {E} [ y \mid f (x) ] - f (x) \right\| _ {p} ^ {p} \right]\right) ^ {\frac {1}{p}}. \tag {1}
|
| 61 |
+
$$
|
| 62 |
+
|
| 63 |
+
We note that we consider multiclass calibration, and that $f(x)$ and the conditional expectation in Equation (1) therefore map to points on a probability simplex. We say that a classifier $f$ is perfectly calibrated if $\mathrm{CE}_p(f) = 0$ .
|
| 64 |
+
|
| 65 |
+
In order to empirically compute the conditional expectation in Equation (1), we need to perform density estimation over the probability simplex. In a binary setting, this has traditionally been done with binned estimates [Naeini et al., 2015, Guo et al., 2017, Kumar et al., 2019]. However, this is not differentiable w.r.t. the function $f$ , and cannot be incorporated into a gradient-based training procedure. Furthermore, binned estimates suffer from the curse of dimensionality and do not have a practical extension to multiclass settings. We consider an estimator for the $\mathrm{CE}_p$ based on Beta and Dirichlet kernel density estimates in the binary and multiclass setting, respectively. We require that this estimator is consistent and differentiable, such that we can train it in a calibration error regularized risk minimization framework. This estimator is given by:
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
\widehat {\operatorname {C E} _ {p} (f) ^ {p}} = \frac {1}{n} \sum_ {j = 1} ^ {n} \left[ \left. \left\| \mathbb {E} [ y \mid f (x) ] \right| _ {f \left(x _ {j}\right)} - f \left(x _ {j}\right) \right\| _ {p} ^ {p} \right], \tag {2}
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
where $\mathbb{E}[y\mid f(x)]\big|_{f(x_j)}$ denotes $\mathbb{E}[y\mid f(x)]$ evaluated at $f(x) = f(x_j)$ . Let $p_{x,y}(x_i,y_i) = p_{y|x = x_i}(y_i)p_x(x_i)$ be the joint density. Then we define the estimator of the conditional expectation as follows:
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
\begin{array}{l} \mathbb {E} [ y \mid f (x) ] = \sum_ {y _ {k} \in \mathcal {Y}} y _ {k} p _ {y \mid f (x)} (y _ {k}) = \frac {\sum_ {y _ {k} \in \mathcal {Y}} y _ {k} p _ {f (x) , y} (f (x) , y _ {k})}{p _ {f (x)} (f (x))} \\ \approx \frac {\sum_ {i = 1} ^ {n} k \left(f (x) ; f \left(x _ {i}\right)\right) y _ {i}}{\sum_ {i = 1} ^ {n} k \left(f (x) ; f \left(x _ {i}\right)\right)} =: \mathbb {E} [ \widehat {y \mid f (x)} ] \tag {3} \\ \end{array}
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
where $k$ is the kernel of a kernel density estimate evaluated at point $x$ and $p_{f(x)}$ is uniquely determined by $p_x$ and $f$ .
|
| 78 |
+
|
| 79 |
+
Proposition 3.2. Assuming that $p_{f(x)}(f(x))$ is Lipschitz continuous over the interior of the simplex, there exists a kernel $k$ such that $\mathbb{E}[y \mid f(x)]$ is a pointwise consistent estimator of $\mathbb{E}[y \mid f(x)]$ , that is:
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
\operatorname * {p l i m} _ {n \rightarrow \infty} \frac {\sum_ {i = 1} ^ {n} k (f (x) ; f (x _ {i})) y _ {i}}{\sum_ {i = 1} ^ {n} k (f (x) ; f (x _ {i}))} = \frac {\sum_ {y _ {k} \in \mathcal {Y}} y _ {k} p _ {f (x) , y} (f (x) , y _ {k})}{p _ {f (x)} (f (x))}. \tag {4}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
Proof. Let $k$ be a Dirichlet kernel [Ouimet and Tolosana-Delgado, 2022]. By the consistency of the Dirichlet kernel density estimators [Ouimet and Tolosana-Delgado, 2022, Theorem 4] Lipschitz continuity of the density over the simplex is a sufficient condition for uniform convergence of the kernel density estimate. This in turn implies that for a given $f$ , for all $f(x) \in (0,1)$ , $\frac{1}{n} \sum_{i=1}^{n} k(f(x); f(x_i)) y_i \xrightarrow{p} \sum_{y_k \in \mathcal{Y}} y_k p_{f(x),y}(f(x),y_k)$ and $\frac{1}{n} \sum_{i=1}^{n} k(f(x); f(x_i)) \xrightarrow{p} p_{f(x)}(f(x))$ . Let $g(x) = 1/x$ , then the set of discontinuities of $g$ applied to the denominator of the l.h.s. of (4) has measure zero since $\frac{1}{n} \sum_{i=1}^{n} k(f(x); f(x_i)) = 0$ with probability zero. From the continuous mapping theorem [Mann and Wald, 1943], it follows that $n / (\sum_{i=1}^{n} k(f(x); f(x_i))) \xrightarrow{p} 1 / p_{f(x)}(f(x))$ . Since products of convergent (in probability) sequences of random variables converge in probability to the product of their limits [Resnick, 2019], we have that $\sum_{i=1}^{n} k(f(x); f(x_i)) y_i g(\sum_{i=1}^{n} k(f(x); f(x_i))) \xrightarrow{p} \sum_{y_k \in \mathcal{Y}} y_k p_{f(x),y}(f(x),y_k) g(p_{f(x)}(f(x))),$ which is equal to the r.h.s. of (4).
|
| 86 |
+
|
| 87 |
+
The most commonly used loss functions are designed to achieve consistency in the sense of Bayes optimality under risk minimization, however, they do not guarantee calibration - neither for finite samples nor in the asymptotic limit. Since we are interested in models $f$ that are both accurate and calibrated, we consider the following optimization problem bounding the calibration error $\mathrm{CE}(f)$ : $f = \arg \min_{f\in \mathcal{F}}\operatorname {Risk}(f)$ , s.t. $\operatorname {CE}(f)\leq B$ for some $B > 0$ , and its associated Lagrangian:
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
f = \arg \min _ {f \in \mathcal {F}} \left(\operatorname {R i s k} (f) + \lambda \cdot \operatorname {C E} (f)\right). \tag {5}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
Mean squared error in binary classification As a first instantiation of this framework we consider a binary classification setting, with mean squared error $\mathrm{MSE}(f) = \mathbb{E}[(f(x) - y)^2]$ as the risk function, jointly optimized with the $L_{2}$ calibration error $\mathrm{CE}_2$ :
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
f = \arg \min _ {f \in \mathcal {F}} \left(\operatorname {M S E} (f) + \lambda \operatorname {C E} _ {2} (f) ^ {2}\right) = \arg \min _ {f \in \mathcal {F}} \left(\operatorname {M S E} (f) + \gamma \mathbb {E} \left[ \mathbb {E} [ y \mid f (x) ] ^ {2} \right]\right) \tag {6}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $\gamma = \frac{\lambda}{\lambda + 1} \in [0,1)$ . The full derivation using the MSE decomposition [Murphy, 1973, Degroot and Fienberg, 1983, Kuleshov and Liang, 2015, Nguyen and O'Connor, 2015] is given in Appendix A. For optimization we wish to find an estimator for $\mathbb{E}[\mathbb{E}[y \mid f(x)]^2]$ . Building upon Equation (3), a partially debiased estimator can be written as:
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\mathbb {E} \left[ \widetilde {\mathbb {E} [ y \mid f (x) ] ^ {2}} \right] \approx \frac {1}{n} \sum_ {j = 1} ^ {n} \frac {\left(\sum_ {i \neq j} k \left(f \left(x _ {j}\right) ; f \left(x _ {i}\right)\right) y _ {i}\right) ^ {2} - \sum_ {i \neq j} \left(k \left(f \left(x _ {j}\right); f \left(x _ {i}\right)\right) y _ {i}\right) ^ {2}}{\left(\sum_ {i \neq j} k \left(f \left(x _ {j}\right); f \left(x _ {i}\right)\right)\right) ^ {2} - \sum_ {i \neq j} \left(k \left(f \left(x _ {j}\right); f \left(x _ {i}\right)\right)\right) ^ {2}}. \tag {7}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
Thus, the conditional expectation is estimated using a ratio of unbiased estimators of the square of a mean.
|
| 106 |
+
|
| 107 |
+
Proposition 3.3. Equation (7) is a ratio of two $U$ -statistics and has a bias converging as $\mathcal{O}\left(\frac{1}{n}\right)$ .
|
| 108 |
+
|
| 109 |
+
The proof is given in Appendix B.
|
| 110 |
+
|
| 111 |
+
Proposition 3.4. There exist de-biasing schemes for the ratios in Equation (7) and Equation (3) that achieve an improved $\mathcal{O}\left(\frac{1}{n^2}\right)$ convergence of the bias.
|
| 112 |
+
|
| 113 |
+
Proofs are given in Appendix C and D.
|
| 114 |
+
|
| 115 |
+
In a binary setting, the kernels $k(\cdot ,\cdot)$ are Beta distributions defined as:
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
k _ {\mathrm {B}} \left(f \left(x _ {j}\right), f \left(x _ {i}\right)\right) := f \left(x _ {j}\right) ^ {\alpha_ {i} - 1} \left(1 - f \left(x _ {j}\right)\right) ^ {\beta_ {i} - 1} \frac {\Gamma \left(\alpha_ {i} + \beta_ {i}\right)}{\Gamma \left(\alpha_ {i}\right) \Gamma \left(\beta_ {i}\right)}, \tag {8}
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
with $\alpha_{i} = \frac{f(x_{i})}{h} + 1$ and $\beta_{i} = \frac{1 - f(x_{i})}{h} + 1$ [Chen, 1999, Bouezmarni and Rolin, 2003, Zhang and Karunamuni, 2010], where $h$ is a bandwidth parameter in the kernel density estimate that goes to 0 as $n \to \infty$ . We note that the computational complexity of this estimator is $\mathcal{O}(n^{2})$ . If we would use this within a gradient descent training procedure, the density can be estimated using a mini-batch and therefore the $\mathcal{O}(n^{2})$ complexity is w.r.t. the size of a mini-batch, not the entire dataset.
|
| 122 |
+
|
| 123 |
+
The estimator in Equation (7) is a ratio of two second order U-statistics that converge as $n^{-1/2}$ [Ferguson, 2005]. Therefore, the overall convergence will be $n^{-1/2}$ . Empirical convergence rates are calculated in Appendix G and shown to be close to the theoretically expected value.
|
| 124 |
+
|
| 125 |
+
Multiclass calibration with Dirichlet kernel density estimates There are several definitions of multiclass calibration that vary in terms of how strictly they define the calibration of the probability vector $f(x)$ . The strongest notion of multiclass calibration, and the one that we focus on in this paper, is canonical (also called multiclass or distribution) calibration [Brocker, 2009, Kull and Flach, 2015, Vaicenavicius et al., 2019], which requires that the whole probability vector $f(x)$ is calibrated (Definition 3.1). Its estimator is:
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\widehat {\mathrm {C E} _ {p} (f) ^ {p}} = \frac {1}{n} \sum_ {j = 1} ^ {n} \left\| \frac {\sum_ {i \neq j} k _ {\operatorname {D i r}} \left(f \left(x _ {j}\right) ; f \left(x _ {i}\right)\right) y _ {i}}{\sum_ {i \neq j} k _ {\operatorname {D i r}} \left(f \left(x _ {j}\right) ; f \left(x _ {i}\right)\right)} - f \left(x _ {j}\right) \right\| _ {p} ^ {p} \tag {9}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
where $k_{\mathrm{Dir}}$ is a Dirichlet kernel defined as:
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
k _ {D i r} \left(f \left(x _ {j}\right), f \left(x _ {i}\right)\right) = \frac {\Gamma \left(\sum_ {k = 1} ^ {K} \alpha_ {i k}\right)}{\prod_ {k = 1} ^ {K} \Gamma \left(\alpha_ {i k}\right)} \prod_ {k = 1} ^ {K} f \left(x _ {j}\right) _ {k} ^ {\alpha_ {i k} - 1} \tag {10}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
with $\alpha_{i} = \frac{f(x_{i})}{h} + 1$ [Ouimet and Tolosana-Delgado, 2022]. As before, the computational complexity is $\mathcal{O}(n^2)$ , irrespective of $p$ .
|
| 138 |
+
|
| 139 |
+
This estimator is differentiable and furthermore, the following proposition holds:
|
| 140 |
+
|
| 141 |
+
Proposition 3.5. The Dirichlet kernel based CE estimator is consistent when $p_{f(x)}(f(x))$ is Lipschitz continuous:
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\operatorname * {p l i m} _ {n \to \infty} \frac {1}{n} \sum_ {j = 1} ^ {n} \left\| \frac {\sum_ {i \neq j} ^ {n} k _ {\mathrm {D i r}} (f (x _ {j}) ; f (x _ {i})) y _ {i}}{\sum_ {i \neq j} ^ {n} k _ {\mathrm {D i r}} (f (x _ {j}) ; f (x _ {i}))} - f (x _ {j}) \right\| _ {p} ^ {p} = \mathbb {E} \bigg [ \left\| \mathbb {E} [ y | f (x) ] - f (x) \right\| _ {p} ^ {p} \bigg ] ^ {p}.
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
Proof. Dirichlet kernel estimators are consistent when the density is Lipschitz continuous over the simplex [Ouimet and Tolosana-Delgado, 2022, Theorem 4], consequently, by Proposition 3.2 the term inside the norm is consistent for any fixed $f(x_{j})$ (note that summing over $i \neq j$ ensures that the ratio of the KDE's does not depend on the outer summation). Moreover, for any convergent sequence also the norm of that sequence converges to the norm of its limit. Ultimately, the outer sum is merely the sample mean of consistent summands, which again is consistent.
|
| 148 |
+
|
| 149 |
+
With this development, we have for the first time a consistent, differentiable, and tractable estimator of $L_{p}$ canonical calibration error with $\mathcal{O}(n^2)$ computational cost and $\mathcal{O}(n^{-1/2})$ convergence rate, with a debiasing scheme that achieves $\mathcal{O}(n^{-2})$ bias for $p \in \{1, 2\}$ .
|
| 150 |
+
|
| 151 |
+
# 4 Empirical validation of $ECE^{KDE}$
|
| 152 |
+
|
| 153 |
+
Accurately evaluating the calibration error is a crucial step towards designing trustworthy models that can be used in societally important settings. The most widely used metric for evaluating miscalibration, and the only other estimator that can be straightforwardly extended to measure canonical calibration, is the histogram-based estimator $ECE^{bin}$ . However, as discussed in Vaicenavicius et al. [2019], Widmann et al. [2019], Ding et al. [2020], Ashukha et al. [2020], it has numerous flaws, such as: (i) it is sensitive to the binning scheme (ii) it is severely affected by the curse of dimensionality, as the number of bins grows exponentially with the number of classes (iii) it is asymptotically inconsistent in many cases.
|
| 154 |
+
|
| 155 |
+
To investigate its relationship with our estimator $ECE^{KDE}$ , we first introduce an extension of the top-label binned estimator to the probability simplex in the three class setting. We start by partitioning the probability simplex into equally-sized, triangle-shaped bins and assign the probability scores to the corresponding bin, as shown in Figure 1a. Then, we define the binned estimate of canonical calibration error as follows: $\mathrm{CE}_p(f)^p\approx \mathbb{E}\left[\| H(f(x)) - f(x)\| _p^p\right]\approx \frac{1}{n}\sum_{i = 1}^n\| H(f(x_i)) - f(x_i)\| _p^p$ , where $H(f(x_i))$ is the histogram estimate, shown in Figure 1b. The surface of the corresponding Dirichlet KDE is presented in Figure 1c. See Appendix F for (i) an experiment investigating their relationship for the three types of calibration (top-label, marginal, canonical), and with varying number of points used for the estimation, and (ii) another example of the binned estimator and Dirichlet KDE on CIFAR-10.
|
| 156 |
+
|
| 157 |
+

|
| 158 |
+
(a) Splitting the simplex in 16 bins
|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
(b) Histogram
|
| 162 |
+
Figure 1: Extension of the binned estimator $ECE^{bin}$ to the probability simplex, compared with $ECE^{KDE}$ . The $ECE^{KDE}$ achieves a better approximation to the finite sample, and accurately models the fact that samples tend to be concentrated near low dimensional faces of the simplex.
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
(c) Dirichlet KDE
|
| 166 |
+
|
| 167 |
+
Synthetic experiments We consider an extension of $ECE^{bin}$ to arbitrary number of classes and investigate its performance compared to $ECE^{KDE}$ . Since on real data the ground truth calibration error is unknown, we generate synthetic data with known transformations, with the following procedure. First, we sample uniformly from the simplex using the Kraemer algorithm [Smith and Tromble, 2004]. Then, we apply temperature scaling with $t_1 = 0.6$ to simulate realistic scenarios where the probability scores are concentrated along lower dimensional faces of the simplex. We generate ground truth labels according to the sampled probabilities, and therefore, obtain a perfectly calibrated classifier. Subsequently, the classifier is miscalibrated by additional temperature scaling with $t_2 = 0.6$ . Figure 2a depicts the performance of the two estimators as a function of the sample size on generated data for 4 and 8 classes. $ECE^{KDE}$ converges to the ground truth value obtained by integration in both cases, whereas $ECE^{bin}$ provides poor estimates even with 20000 points.
|
| 168 |
+
|
| 169 |
+
In another experiment with synthetic data we look at the bias of the sharpness term in a binary setting. In Figure 2b we plot the estimated value of the sharpness term for varying number of samples, both using the partially debiased ratio from Equation (7), and the ratio debiased with the scheme introduced in Appendix D. A sigmoidal function is applied to the calibrated data to obtain an uncalibrated sample that is used to compute the partially debiased and the fully debiased ratio of the sharpness term. The ground truth value is obtained by using 100 million samples to compute the ratio with the partially debiased version, as it converges asymptotically to the true value due to its consistency. We use a
|
| 170 |
+
|
| 171 |
+
bandwidth of 0.5 and average over 10000 repetitions for each number of samples that range from 32 to 16384. We fix the location of the KDE at $f(x_{j}) = 0.17$ .
|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
(a) $ECE^{bin}$ vs. $ECE^{KDE}$
|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
(b) Debiasing
|
| 178 |
+
Figure 2: 2a Performance of $ECE^{bin}$ and $ECE^{KDE}$ on synthetic data for varying number of classes, as a function of the sample size. Ground truth represents the true value of the integral. $ECE^{bin}$ is calculated using several common choices for the number of bins (n_bins) represents number of bins per-class.) $n_bins^*$ and $b^*$ are found as optimal values according to Doane's formula [Doane, 1976] and LOO MLE, respectively. $ECE^{KDE}$ converges to the true value in all settings, in contrast to $ECE^{bin}$ . 2b Sharpness term evaluated for different numbers of samples with the partially debiased ratio from Equation (7), and with the debiasing scheme derived in Appendix D on synthetic data.
|
| 179 |
+
|
| 180 |
+
# 5 Calibration regularized training
|
| 181 |
+
|
| 182 |
+
Empirical setup To showcase our estimator in applications where canonical calibration is crucial, we consider two medical datasets, namely Kather and DermaMNIST. The Kather dataset [Kather et al., 2016] consists of 5000 histological images of human colorectal cancer and it has eight different classes of tissue. DermaMNIST [Yang et al., 2021] is a pre-processed version of the HAM10000 dataset [Tschandl et al., 2018], containing 10015 dermatoscopic images of skin lesions, categorized in seven classes. Both datasets have been collected in accordance with the Declaration of Helsinki. According to standard practice in related works, we trained ResNet [He et al., 2016], ResNet with stochastic depth (SD) [Huang et al., 2016], DenseNet [Huang et al., 2017] and WideResNet [Zagoruyko and Komodakis, 2016] networks also on CIFAR-10/100 [Krizhevsky, 2009]. We use 45000 images for training on the CIFAR datasets, 4000 for Kather and 7007 for DermaMNIST. The code is available at https://github.com/tpopordanoska/ece-kde.
|
| 183 |
+
|
| 184 |
+
Baselines Cross-entropy. The first baseline model is trained using cross-entropy (XE), with the data preprocessing, training procedure and hyperparameters described in the corresponding paper for the architecture.
|
| 185 |
+
|
| 186 |
+
Trainable calibration strategies. KDE-XE denotes the joint training of XE with our proposed estimator $ECE^{KDE}$ , as defined in Equation (9). MMCE [Kumar et al., 2018] is a differentiable measure of calibration with a property that it is minimized at perfect calibration, i.e., MMCE is 0 if and only if $\mathrm{CE}_p = 0$ . It is used as a regulariser alongside NLL, with the strength of regularization parameterized by $\lambda$ . Focal loss (FL) [Mukhoti et al., 2020] is an alternative to the cross-entropy loss, defined as $\mathcal{L}_f = -(1 - f(y|x))^{\gamma}\log (f(y|x))$ , where $\gamma$ is a hyperparameter and $f(y|x)$ is the probability score that a neural network $f$ outputs for a class $y$ on an input $x$ . Their best-performing approach is the sample-dependent FL-53, where $\gamma = 5$ for $f(y|x) \in [0,0.2)$ , and $\gamma = 3$ otherwise.
|
| 187 |
+
|
| 188 |
+
Post-hoc calibration strategies. Guo et al. [2017] investigated the performance of several post-hoc calibration methods and found temperature scaling to be a strong baseline, which we use as a representative of this group. It works by scaling the logits with a scalar $T > 0$ , typically learned on a validation set by minimizing NLL. Following Kumar et al. [2018] and Mukhoti et al. [2020], we also use temperature scaling as a post-processing step for our method.
|
| 189 |
+
|
| 190 |
+
Metrics We report $L_{1}$ canonical calibration using our $ECE^{KDE}$ estimator, calculated according to Equation (9). Additional experiments with $L_{1}$ and $L_{2}$ top-label calibration on CIFAR-10/100 can be found in Appendix E.
|
| 191 |
+
|
| 192 |
+
Hyperparameters A crucial parameter for KDE is the bandwidth $b$ , a positive number that defines the smoothness of the density plot. Poorly chosen bandwidth may lead to undersmoothing (small bandwidth) or oversmoothing (large bandwidth), as shown in Figure 3. A commonly used non-parametric bandwidth selector is maximum likelihood cross validation [Duin, 1976]. For our experiments we choose the bandwidth from a list of possible values by maximizing the leave-one-out likelihood (LOO MLE). The $\lambda$ parameter for weighting the calibration error w.r.t the loss is typically chosen via cross-validation or using a holdout validation set. We found that for KDE-XE, values of $\lambda \in [0.001, 0.2]$ provide a good trade-off in terms of accuracy and calibration error. The $p$ parameter is selected depending on the desired $L_{p}$ calibration error and the corresponding theoretical guarantees. The rest of the
|
| 193 |
+
|
| 194 |
+
hyperparameters for training are set as proposed in the corresponding papers for the architectures we benchmark. In particular, for the CIFAR-10/100 datasets we used a batch size of 64 for DenseNet and 128 for the other architectures. For the medical datasets, we used a batch size of 64, due to their smaller size.
|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
Figure 3: Effect of the bandwidth $b$ on the shape of the estimate.
|
| 198 |
+
|
| 199 |
+
# 5.1 Experiments
|
| 200 |
+
|
| 201 |
+
An important property of our $ECE^{KDE}$ estimator is differentiability, allowing it to be used in a calibration regularized training framework. In this section, we benchmark KDE-XE with several baselines on medical diagnosis applications, where the calibration of the whole probability vector is of particular interest. For completeness, we also include an experiment on CIFAR-10.
|
| 202 |
+
|
| 203 |
+
Table 2 summarizes the canonical $L_{1}$ $ECE^{KDE}$ and Table 3 the accuracy, measured across multiple architectures. The bandwidth is chosen by LOO MLE. For MMCE and KDE-XE, we train the models with several values for the regularization weight, and report the best performing one. In Table 2 we notice that KDE-XE consistently achieves very competitive ECE values, while also boosting the accuracy, as shown in Table 3. Interestingly, we observe that temperature scaling does not improve canonical calibration error, contrary to its reported improvements on top-label calibration. This observation that temperature scaling is less effective for stronger notions of calibration is consistent with a similar finding in Kull et al. [2019], where the authors show that although the temperature-scaled model has well calibrated top-label confidence scores, the calibration error is much larger for class-wise calibration.
|
| 204 |
+
|
| 205 |
+
Table 2: Canonical $L_{1}$ $ECE^{KDE}(\downarrow)$ for different loss functions and architectures, both trained from scratch (Pre T) and after temperature scaling on a validation set (Post T). Best results across Pre T methods are marked in bold.
|
| 206 |
+
|
| 207 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Model</td><td colspan="2">XE</td><td colspan="2">MMCE</td><td colspan="2">FL-53</td><td colspan="2">KDE-XE (Our)</td></tr><tr><td>Pre T</td><td>Post T</td><td>Pre T</td><td>Post T</td><td>Pre T</td><td>Post T</td><td>Pre T</td><td>Post T</td></tr><tr><td rowspan="4">Kather</td><td>ResNet-110</td><td>0.335</td><td>0.304</td><td>0.343</td><td>0.300</td><td>0.325</td><td>0.248</td><td>0.311</td><td>0.289</td></tr><tr><td>ResNet-110 (SD)</td><td>0.329</td><td>0.334</td><td>0.235</td><td>0.159</td><td>0.209</td><td>0.122</td><td>0.198</td><td>0.147</td></tr><tr><td>Wide-ResNet-28-10</td><td>0.177</td><td>0.259</td><td>0.201</td><td>0.241</td><td>0.270</td><td>0.328</td><td>0.162</td><td>0.212</td></tr><tr><td>DenseNet-40</td><td>0.244</td><td>0.251</td><td>0.159</td><td>0.218</td><td>0.165</td><td>0.207</td><td>0.114</td><td>0.154</td></tr><tr><td rowspan="4">DermaMNIST</td><td>ResNet-110</td><td>0.579</td><td>0.602</td><td>0.575</td><td>0.603</td><td>0.684</td><td>0.618</td><td>0.467</td><td>0.516</td></tr><tr><td>ResNet-110 (SD)</td><td>0.534</td><td>0.571</td><td>0.470</td><td>0.526</td><td>0.567</td><td>0.594</td><td>0.461</td><td>0.538</td></tr><tr><td>Wide-ResNet-28-10</td><td>0.546</td><td>0.599</td><td>0.470</td><td>0.512</td><td>0.623</td><td>0.608</td><td>0.455</td><td>0.599</td></tr><tr><td>DenseNet-40</td><td>0.573</td><td>0.578</td><td>0.514</td><td>0.558</td><td>0.577</td><td>0.557</td><td>0.366</td><td>0.418</td></tr><tr><td rowspan="4">CIFAR-10</td><td>ResNet-110</td><td>0.133</td><td>0.170</td><td>0.171</td><td>0.196</td><td>0.138</td><td>0.171</td><td>0.126</td><td>0.163</td></tr><tr><td>ResNet-110 (SD)</td><td>0.132</td><td>0.172</td><td>0.164</td><td>0.203</td><td>0.156</td><td>0.201</td><td>0.178</td><td>0.223</td></tr><tr><td>Wide-ResNet-28-10</td><td>0.083</td><td>0.098</td><td>0.143</td><td>0.155</td><td>0.147</td><td>0.177</td><td>0.077</td><td>0.091</td></tr><tr><td>DenseNet-40</td><td>0.104</td><td>0.131</td><td>0.133</td><td>0.155</td><td>0.081</td><td>0.081</td><td>0.098</td><td>0.124</td></tr></table>
|
| 208 |
+
|
| 209 |
+
Table 3: Accuracy (↑) computed for different architectures. Best results are marked in bold.
|
| 210 |
+
|
| 211 |
+
<table><tr><td>Dataset</td><td>Model</td><td>XE</td><td>MMCE</td><td>FL-53</td><td>KDE-XE (Our)</td></tr><tr><td rowspan="4">Kather</td><td>ResNet-110</td><td>0.840</td><td>0.860</td><td>0.844</td><td>0.860</td></tr><tr><td>ResNet-110 (SD)</td><td>0.870</td><td>0.900</td><td>0.885</td><td>0.914</td></tr><tr><td>Wide-ResNet-28-10</td><td>0.933</td><td>0.899</td><td>0.873</td><td>0.921</td></tr><tr><td>DenseNet-40</td><td>0.913</td><td>0.93</td><td>0.916</td><td>0.941</td></tr><tr><td rowspan="4">DermaMNIST</td><td>ResNet-110</td><td>0.720</td><td>0.721</td><td>0.674</td><td>0.744</td></tr><tr><td>ResNet-110 (SD)</td><td>0.743</td><td>0.753</td><td>0.689</td><td>0.764</td></tr><tr><td>Wide-ResNet-28-10</td><td>0.736</td><td>0.741</td><td>0.715</td><td>0.754</td></tr><tr><td>DenseNet-40</td><td>0.741</td><td>0.758</td><td>0.705</td><td>0.748</td></tr><tr><td rowspan="4">CIFAR-10</td><td>ResNet-110</td><td>0.925</td><td>0.929</td><td>0.922</td><td>0.929</td></tr><tr><td>ResNet-110 (SD)</td><td>0.926</td><td>0.925</td><td>0.92</td><td>0.907</td></tr><tr><td>Wide-ResNet-28-10</td><td>0.954</td><td>0.947</td><td>0.936</td><td>0.954</td></tr><tr><td>DenseNet-40</td><td>0.947</td><td>0.944</td><td>0.948</td><td>0.947</td></tr></table>
|
| 212 |
+
|
| 213 |
+
Figure 4 shows the performance of several architectures and datasets in terms of accuracy and $L_{1}$ $ECE^{KDE}$ for various choices of the regularization parameter for MMCE and KDE-XE. The $95\%$ confidence intervals for $ECE^{KDE}$ are calculated using 100 and 10 bootstrap samples on the medical datasets and CIFAR-10, respectively. In all settings, KDE-XE Pareto dominates the competitors, for several choices of $\lambda$ . For example, on DermaMNIST trained with DenseNet, KDE-XE with $\lambda = 0.2$ reduces $ECE^{KDE}$ from $66\%$ to $45\%$ .
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
(a) ResNet-110 (SD) on Kather
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
(b) DenseNet on DermaMNIST
|
| 220 |
+
Figure 4: Canonical calibration on various datasets and architectures. The numbers next to the points denote the value of the regularization parameter. KDE-XE outperforms the competitors, both in terms of accuracy and calibration error, for several choices of $\lambda$ .
|
| 221 |
+
|
| 222 |
+

|
| 223 |
+
(c) ResNet-110 on CIFAR-10
|
| 224 |
+
|
| 225 |
+
Training time measurements In Table 4 we summarize the running time per epoch of the four architectures, with regularization (KDE-XE), and without regularization (XE). We observe only an insignificant impact on the training speed when using KDE-XE, dispelling any concerns w.r.t. the computational overhead.
|
| 226 |
+
|
| 227 |
+
To summarize, the experiments show that our estimator is consistently producing competitive calibration errors with other state-of-the-art approaches, while maintaining accuracy and keep
|
| 228 |
+
|
| 229 |
+
ing the computational complexity at $\mathcal{O}(n^2)$ . We note that within the proposed calibration-regularized training framework, this complexity is w.r.t. to a mini-batch, and the added cost is less than a couple percent. Furthermore, the $\mathcal{O}(n^2)$ complexity shows up in other related works [Kumar et al., 2018, Zhang et al., 2020], and is intrinsic to the problem of density estimators of calibration error. As a future work, a larger scale benchmarking will be beneficial for exploring the limits of canonical calibration using Dirichlet kernels.
|
| 230 |
+
|
| 231 |
+
Table 4: Training time [sec] per epoch for XE and KDE-XE for different models on CIFAR-10.
|
| 232 |
+
|
| 233 |
+
<table><tr><td>Dataset</td><td>Model</td><td>XE</td><td>KDE-XE</td></tr><tr><td rowspan="4">CIFAR-10</td><td>ResNet-110</td><td>51.8</td><td>53.0</td></tr><tr><td>ResNet-110 (SD)</td><td>45.0</td><td>46.0</td></tr><tr><td>Wide-ResNet-28-10</td><td>152.9</td><td>154.9</td></tr><tr><td>DenseNet-40</td><td>103.2</td><td>106.8</td></tr></table>
|
| 234 |
+
|
| 235 |
+
# 6 Conclusion
|
| 236 |
+
|
| 237 |
+
In this paper, we proposed a consistent and differentiable estimator of canonical $L_{p}$ calibration error using Dirichlet kernels. It has favorable computational and statistical properties, with a complexity of $\mathcal{O}(n^2)$ , convergence of $\mathcal{O}(n^{-1/2})$ , and a bias that converges as $\mathcal{O}(n^{-1})$ , which can be further reduced to $\mathcal{O}(n^{-2})$ using our debiasing strategy. The $ECE^{KDE}$ can be directly optimized alongside any loss function in the existing batch stochastic gradient descent framework. Furthermore, we propose using it as a measure of the highest form of calibration, which requires the entire probability vector to be calibrated. To the best of our knowledge, this is the only metric that can tractably capture this type of calibration, which is crucial in safety-critical applications where downstream decisions are made based on the predicted probabilities. We showed empirically on a range of neural architectures and datasets that the performance of our estimator in terms of accuracy and calibration error is competitive against the current state-of-the-art, while having superior properties as a consistent estimator of canonical calibration error.
|
| 238 |
+
|
| 239 |
+
# Acknowledgments
|
| 240 |
+
|
| 241 |
+
This research received funding from the Research Foundation - Flanders (FWO) through project number S001421N, and the Flemish Government under the "Onderzoeksprogramma Artificièle Intelligentie (AI) Vlaanderen" programme. R.S. was supported in part by the Tübingen AI centre.
|
| 242 |
+
|
| 243 |
+
# Ethical statement
|
| 244 |
+
|
| 245 |
+
The paper is concerned with estimation of calibration error, a topic for which existing methods are deployed, albeit not typically for canonical calibration error in a multi-class setting. We therefore consider the ethical risks to be effectively the same as for any probabilistic classifier. Experiments apply the method to medical image classification, for which misinterpretation of benchmark results with respect to their clinical applicability has been highlighted as a risk, see e.g. Varoquaux and Cheptygina [2022].
|
| 246 |
+
|
| 247 |
+
# References
|
| 248 |
+
|
| 249 |
+
Arsenii Ashukha, Alexander Lyzhov, Dmitry Molchanov, and Dmitry Vetrov. Pitfalls of in-domain uncertainty estimation and assembling in deep learning. In International Conference on Learning Representations, 2020.
|
| 250 |
+
Ondrej Bohdal, Yongxin Yang, and Timothy Hospedales. Meta-calibration: Meta-learning of model calibration using differentiable expected calibration error. arXiv preprint arXiv:2106.09613, 2021.
|
| 251 |
+
Taoufik Bouezmarni and Jean-Marie Rolin. Consistency of the beta kernel density function estimator. The Canadian Journal of Statistics / La Revue Canadienne de Statistique, 31(1):89-98, 2003.
|
| 252 |
+
Jochen Brocker. Reliability, sufficiency, and the decomposition of proper scores. Quarterly Journal of the Royal Meteorological Society, 135(643):1512-1519, Jul 2009.
|
| 253 |
+
Holger Caesar, Varun Kumar Reddy Bankiti, Alex Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, page 11621-11631, 06 2020.
|
| 254 |
+
Song Xi Chen. Beta kernel estimators for density functions. Computational Statistics & Data Analysis, 31:131-145, 1999.
|
| 255 |
+
M. Degroot and S. Fienberg. The comparison and evaluation of forecasters. The Statistician, 32: 12-22, 1983.
|
| 256 |
+
Yukun Ding, Jinglan Liu, Jinjun Xiong, and Yiyu Shi. Revisiting the evaluation of uncertainty estimation and its application to explore model complexity-uncertainty trade-off. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 22-31, 2020.
|
| 257 |
+
|
| 258 |
+
David P. Doane. Aesthetic frequency classifications. The American Statistician, 30(4):181-183, 1976.
|
| 259 |
+
Robert Duin. On the choice of smoothing parameters for parzen estimators of probability density functions. IEEE Transactions on Computers, C-25(11):1175-1179, 1976.
|
| 260 |
+
Andre Esteva, Brett Kuprel, Roberto A. Novoa, Justin Ko, Susan M. Swetter, Helen M. Blau, and Sebastian Thrun. Dermatologist-level classification of skin cancer with deep neural networks. Nature, 542:115-, 2017.
|
| 261 |
+
Andre Esteva, Alexandre Robicquet, Bharath Ramsundar, Volodymyr Kuleshov, Mark DePristo, Katherine Chou, Claire Cui, Greg Corrado, Sebastian Thrun, and Jeff Dean. A guide to deep learning in healthcare. Nature Medicine, 25, 01 2019.
|
| 262 |
+
Thomas S. Ferguson. U-statistics. In Notes for Statistics 200C. UCLA, 2005.
|
| 263 |
+
Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q Weinberger. On calibration of modern neural networks. In International conference on machine learning, pages 1321-1330. PMLR, 2017.
|
| 264 |
+
Kartik Gupta, Amir Rahimi, Thalaiyasingam Ajanthan, Thomas Mensink, Cristian Sminchisescu, and Richard Hartley. Calibration of neural networks using splines. In International Conference on Learning Representations, 2021.
|
| 265 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016.
|
| 266 |
+
Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. Deep networks with stochastic depth. In European conference on computer vision, pages 646-661. Springer, 2016.
|
| 267 |
+
Gao Huang, Zhuang Liu, Laurens van der Maaten, and Kilian Q. Weinberger. Densely connected convolutional networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017.
|
| 268 |
+
Jakob Kather, Cleo-Aron Weis, Francesco Bianconi, Susanne Melchers, Lothar Schad, Timo Gaiser, Alexander Marx, and Frank Zöllner. Multi-class texture analysis in colorectal cancer histology. Scientific Reports, 6:27988, 06 2016.
|
| 269 |
+
Alex Krizhevsky. Learning multiple layers of features from tiny images. Technical report, University of Toronto, 2009.
|
| 270 |
+
Volodymyr Kuleshov and Percy S Liang. Calibrated structured prediction. In C. Cortes, N. Lawrence, D. Lee, M. Sugiyama, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 28. Curran Associates, Inc., 2015.
|
| 271 |
+
Meelis Kull and Peter Flach. Novel decompositions of proper scoring rules for classification: Score adjustment as precursor to calibration. In Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pages 68-85. Springer, 2015.
|
| 272 |
+
Meelis Kull, Telmo Silva Filho, and Peter Flach. Beta calibration: a well-founded and easily implemented improvement on logistic calibration for binary classifiers. In Aarti Singh and Jerry Zhu, editors, Proceedings of the 20th International Conference on Artificial Intelligence and Statistics, volume 54 of Proceedings of Machine Learning Research, pages 623-631. PMLR, 20-22 Apr 2017.
|
| 273 |
+
Meelis Kull, Miquel Perello Nieto, Markus Kangsepp, Telmo Silva Filho, Hao Song, and Peter Flach. Beyond temperature scaling: Obtaining well-calibrated multi-class probabilities with dirichlet calibration. Advances in neural information processing systems, 32, 2019.
|
| 274 |
+
Ananya Kumar, Percy S Liang, and Tengyu Ma. Verified uncertainty calibration. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32, pages 3792-3803. 2019.
|
| 275 |
+
Aviral Kumar, Sunita Sarawagi, and Ujjwal Jain. Trainable calibration measures for neural networks from kernel mean embeddings. In ICML, 2018.
|
| 276 |
+
|
| 277 |
+
Gongbo Liang, Yu Zhang, Xiaogin Wang, and Nathan Jacobs. Improved trainable calibration method for neural networks on medical imaging classification. In British Machine Vision Conference, 2020.
|
| 278 |
+
Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dólar. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision, pages 2980-2988, 2017.
|
| 279 |
+
Chunwei Ma, Ziyun Huang, Jiayi Xian, Mingchen Gao, and Jinhui Xu. Improving uncertainty calibration of deep neural networks via truth discovery and geometric optimization. In Cassio de Campos and Marloes H. Maathuis, editors, Proceedings of the Thirty-Seventh Conference on Uncertainty in Artificial Intelligence, volume 161 of Proceedings of Machine Learning Research, pages 75-85. PMLR, 27-30 Jul 2021.
|
| 280 |
+
Henry B Mann and Abraham Wald. On stochastic limit and order relationships. The Annals of Mathematical Statistics, 14(3):217-226, 1943.
|
| 281 |
+
Jishnu Mukhoti, Viveka Kulharia, Amartya Sanyal, Stuart Golodetz, Philip Torr, and Puneet Dokania. Calibrating deep neural networks using focal loss. Advances in Neural Information Processing Systems, 33:15288-15299, 2020.
|
| 282 |
+
Rafael Müller, Simon Kornblith, and Geoffrey E Hinton. When does label smoothing help? Advances in neural information processing systems, 32, 2019.
|
| 283 |
+
A. Murphy. A new vector partition of the probability score. Journal of Applied Meteorology, 12: 595-600, 1973.
|
| 284 |
+
Mahdi Pakdaman Naeini and Gregory F Cooper. Binary classifier calibration using an ensemble of near isotonic regression models. In 2016 IEEE 16th International Conference on Data Mining (ICDM), pages 360-369. IEEE, 2016.
|
| 285 |
+
Mahdi Pakdaman Naeini, Gregory F. Cooper, and Milos Hauskrecht. Obtaining well calibrated probabilities using Bayesian binning. In Proceedings of the Twenty-Ninth AAAI Conference on Artificial Intelligence, pages 2901-2907, 2015.
|
| 286 |
+
Khanh Nguyen and Brendan O'Connor. Posterior calibration and exploratory analysis for natural language processing models. arXiv preprint arXiv:1508.05154, 2015.
|
| 287 |
+
R.C. Ogliore, G.R. Huss, and K. Nagashima. Ratio estimation in SIMS analysis. *Nuclear Instruments and Methods in Physics Research Section B-beam Interactions With Materials and Atoms*, 269, 06-2011.
|
| 288 |
+
Frédéric Ouimet and Raimon Tolosana-Delgado. Asymptotic properties of dirichlet kernel density estimators. Journal of Multivariate Analysis, 187:104832, 2022.
|
| 289 |
+
Emanuel Parzen. On estimation of a probability density function and mode. The Annals of Mathematical Statistics, 33(3):1065-1076, 1962.
|
| 290 |
+
Gabriel Pereyra, George Tucker, Jan Chorowski, Łukasz Kaiser, and Geoffrey Hinton. Regularizing neural networks by penalizing confident output distributions. arXiv preprint arXiv:1701.06548, 2017.
|
| 291 |
+
John C. Platt. Probabilistic outputs for support vector machines and comparisons to regularized likelihood methods. In Advances in Large Margin Classifiers, pages 61-74. MIT Press, 1999.
|
| 292 |
+
Sidney Resnick. A probability path. Springer, 2019.
|
| 293 |
+
Rebecca Roelofs, Nicholas Cain, Jonathon Shlens, and Michael C. Mozer. Mitigating bias in calibration error estimation. In Gustau Camps-Valls, Francisco J. R. Ruiz, and Isabel Valera, editors, Proceedings of The 25th International Conference on Artificial Intelligence and Statistics, volume 151 of Proceedings of Machine Learning Research, pages 4036-4054. PMLR, 28-30 Mar 2022.
|
| 294 |
+
Murray Rosenblatt. Remarks on some nonparametric estimates of a density function. The Annals of Mathematical Statistics, 27(3):832 - 837, 1956.
|
| 295 |
+
|
| 296 |
+
Jun Shao. Mathematical Statistics, page 180. Springer Texts in Statistics, second edition, 2003.
|
| 297 |
+
B. W. Silverman. Density Estimation for Statistics and Data Analysis. Chapman & Hall, 1986.
|
| 298 |
+
Noah A. Smith and Roy W. Tromble. Sampling Uniformly from the Unit Simplex. 2004.
|
| 299 |
+
Pei Sun, Henrik Kretzschmar, Xerxes Dotiwalla, Aurelien Chouard, Vijaysai Patnaik, Paul Tsui, James Guo, Yin Zhou, Yuning Chai, Benjamin Caine, Vijay Vasudevan, Wei Han, Jiquan Ngiam, Hang Zhao, Aleksei Timofeev, Scott M. Ettinger, Maxim Krivokon, Amy Gao, Aditya Joshi, Yu Zhang, Jonathon Shlens, Zhifeng Chen, and Dragomir Anguelov. Scalability in perception for autonomous driving: Waymo open dataset. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2443-2451, 2020.
|
| 300 |
+
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2818-2826, 2016.
|
| 301 |
+
Myint Tin. Comparison of some ratio estimators. Journal of the American Statistical Association, 60 (309):294-307, 1965.
|
| 302 |
+
Philipp Tschandl, Cliff Rosendahl, and Harald Kittler. The HAM10000 dataset, a large collection of multi-source dermatoscopic images of common pigmented skin lesions. Sci. Data, 5:180161, 2018.
|
| 303 |
+
Juozas Vaicenavicius, David Widmann, Carl Andersson, Fredrik Lindsten, Jacob Roll, and Thomas Schon. Evaluating model calibration in classification. In The 22nd International Conference on Artificial Intelligence and Statistics, pages 3459-3467. PMLR, 2019.
|
| 304 |
+
Gael Varoquaux and Veronika Cheplygina. Machine learning for medical imaging: methodological failures and recommendations for the future. npj Digital Medicine, 5:48, 04 2022.
|
| 305 |
+
Jonathan Wenger, Hedvig Kjellström, and Rudolph Triebel. Non-parametric calibration for classification. In International Conference on Artificial Intelligence and Statistics, pages 178-190, 2020.
|
| 306 |
+
David Widmann, Fredrik Lindsten, and Dave Zachariah. Calibration tests in multi-class classification: A unifying framework. Advances in Neural Information Processing Systems, 32, 2019.
|
| 307 |
+
Jiancheng Yang, Rui Shi, and Bingbing Ni. Medmnist classification decathlon: A lightweight automl benchmark for medical image analysis. 2021 IEEE 18th International Symposium on Biomedical Imaging (ISBI), Apr 2021.
|
| 308 |
+
B. Zadrozny and C. Elkan. Transforming classifier scores into accurate multiclass probability estimates. Proceedings of the eighth ACM SIGKDD international conference on Knowledge discovery and data mining, 2002.
|
| 309 |
+
Bianca Zadrozny and Charles Elkan. Obtaining calibrated probability estimates from decision trees and naive bayesian classifiers. ICML, 1, 05 2001.
|
| 310 |
+
Sergey Zagoruyko and Nikos Komodakis. Wide residual networks. In *British Machine Vision Conference*, 2016.
|
| 311 |
+
Jize Zhang, Bhavya Kailkhura, and T. Yong-Jin Han. Mix-n-match: Ensemble and compositional methods for uncertainty calibration in deep learning. In International Conference on Machine Learning, 2020.
|
| 312 |
+
Shunpu Zhang and Rohana Karunamuni. Boundary performance of the beta kernel estimators. Journal of Nonparametric Statistics, 22:81-104, 01 2010.
|
| 313 |
+
Shengjia Zhao, Michael Kim, Roshni Sahoo, Tengyu Ma, and Stefano Ermon. Calibrating predictions to decisions: A novel approach to multi-class calibration. In M. Ranzato, A. Beygelzimer, Y. Dauphin, P.S. Liang, and J. Wortman Vaughan, editors, Advances in Neural Information Processing Systems, volume 34, pages 22313-22324. Curran Associates, Inc., 2021.
|
| 314 |
+
|
| 315 |
+
# Checklist
|
| 316 |
+
|
| 317 |
+
1. For all authors...
|
| 318 |
+
|
| 319 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 320 |
+
(b) Did you describe the limitations of your work? [Yes]
|
| 321 |
+
(c) Did you discuss any potential negative societal impacts of your work? [Yes] Please refer to our ethical statement.
|
| 322 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 323 |
+
|
| 324 |
+
2. If you are including theoretical results...
|
| 325 |
+
|
| 326 |
+
(a) Did you state the full set of assumptions of all theoretical results? [Yes]
|
| 327 |
+
(b) Did you include complete proofs of all theoretical results? [Yes]
|
| 328 |
+
|
| 329 |
+
3. If you ran experiments...
|
| 330 |
+
|
| 331 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes] It is in the supplementary material
|
| 332 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes]
|
| 333 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [Yes] See Figure 4
|
| 334 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [Yes] [No] Table 4 includes compute times. Most of our results are provided in big O complexity.
|
| 335 |
+
|
| 336 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 337 |
+
|
| 338 |
+
(a) If your work uses existing assets, did you cite the creators? [Yes]
|
| 339 |
+
(b) Did you mention the license of the assets? [No] We do not release the data. Data license is available via the citation.
|
| 340 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [No]
|
| 341 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [Yes] Medical datasets used in this paper conform to the Declaration of Helsinki.
|
| 342 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [Yes] Medical datasets used in this paper conform to the Declaration of Helsinki.
|
| 343 |
+
|
| 344 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 345 |
+
|
| 346 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A] We did not use crowdsourcing or conducted research with human subjects
|
| 347 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A] We did not use crowdsourcing or conducted research with human subjects
|
| 348 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A] We did not use crowdsourcing or conducted research with human subjects
|
aconsistentanddifferentiablelpcanonicalcalibrationerrorestimator/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:68ac195c203ba37f8ee9b20fbb1f8af303de2b819765ba804278f5412848a241
|
| 3 |
+
size 409351
|
aconsistentanddifferentiablelpcanonicalcalibrationerrorestimator/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb75b0ebc26e0a0d76c474c4000a165c228c51d8d0002722e88391fc5b1b4cd2
|
| 3 |
+
size 497455
|
aconsolidatedcrossvalidationalgorithmforsupportvectormachinesviadatareduction/0662105f-d79c-4957-bbb9-48afa250a3f7_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:90e8c5e2015f2d21c771490f2b86c0791a4a47aaf446258251d45cb59a5487ea
|
| 3 |
+
size 89859
|
aconsolidatedcrossvalidationalgorithmforsupportvectormachinesviadatareduction/0662105f-d79c-4957-bbb9-48afa250a3f7_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ddedde8929c7daba6245d3d21a9a62b5d66adba8e7ed27e9df18764e6acbbefd
|
| 3 |
+
size 111960
|
aconsolidatedcrossvalidationalgorithmforsupportvectormachinesviadatareduction/0662105f-d79c-4957-bbb9-48afa250a3f7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8e69fecc3b087c81c77e0486eff19e2f440fe037372954594cb29c48d2626cc7
|
| 3 |
+
size 385286
|
aconsolidatedcrossvalidationalgorithmforsupportvectormachinesviadatareduction/full.md
ADDED
|
@@ -0,0 +1,480 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A Consolidated Cross-Validation Algorithm for Support Vector Machines via Data Reduction
|
| 2 |
+
|
| 3 |
+
Boxiang Wang
|
| 4 |
+
|
| 5 |
+
Department of Statistics and Actuarial Science
|
| 6 |
+
|
| 7 |
+
University of Iowa
|
| 8 |
+
|
| 9 |
+
Iowa City, IA 52242, USA
|
| 10 |
+
|
| 11 |
+
boxiang-wang@uiowa.edu
|
| 12 |
+
|
| 13 |
+
Archer Y. Yang
|
| 14 |
+
|
| 15 |
+
Department of Mathematics and Statistics
|
| 16 |
+
|
| 17 |
+
McGill University
|
| 18 |
+
|
| 19 |
+
Montreal, QC H3A 0B9, Canada
|
| 20 |
+
|
| 21 |
+
archer.yang@mcgill.ca
|
| 22 |
+
|
| 23 |
+
# Abstract
|
| 24 |
+
|
| 25 |
+
We propose a consolidated cross-validation (CV) algorithm for training and tuning the support vector machines (SVM) on reproducing kernel Hilbert spaces. Our consolidated CV algorithm utilizes a recently proposed exact leave-one-out formula for the SVM and accelerates the SVM computation via a data reduction strategy. In addition, to compute the SVM with the bias term (intercept), which is not handled by the existing data reduction methods, we propose a novel two-stage consolidated CV algorithm. With numerical studies, we demonstrate that our algorithm is about an order of magnitude faster than the two mainstream SVM solvers, knrlab and LIBSVM, with almost the same accuracy.
|
| 26 |
+
|
| 27 |
+
# 1 Introduction
|
| 28 |
+
|
| 29 |
+
This paper concerns one of the most successful classifiers, the kernel support vector machine (SVM) (Cortes and Vapnik, 1995; Vapnik, 1995, 1998), which has been popularly used on structured data in the past two decades. The success of the SVM is mainly attributed to its appealing geometric interpretation, solid theoretical foundation, and high predictive power. To assess the predictive accuracy of the SVM, cross-validation (CV)(Wahba and Wold, 1975; Arlot and Celisse, 2010) is perhaps the most commonly used method in practice. In a $K$ -fold CV procedure, the training data is randomly split into $K$ equal-sized groups. Based on data splitting, part of the data is used for training each competing model and the rest of the data is reserved for evaluating the prediction error. The model with the smallest CV error is finally elected. Typical choices of $K$ are 5, 10, or $n$ (the sample size), where $K = n$ yields the so-called leave-one-out cross-validation (LOOCV).
|
| 30 |
+
|
| 31 |
+
LOOCV is generally less used than ten-fold and five-fold CV, largely because of the two popular arguments: (1) high computational cost of LOOCV; (2) much larger variance than five-fold or ten-fold CV. We must point out that while the first argument is true in some sense, the second argument is not generally true about LOOCV. For instance, Kohavi (1995) and Hastie et al. (2009) argue that leave-one-out is almost unbiased, but it has high variance, leading to unreliable estimates. A series of revealing works, e.g., Burman (1989); Bengio and Grandvalet (2004); Molinaro et al. (2005); Zhang and Yang (2015), have shown that, both empirically and theoretically, for modeling procedures with low instability, LOOCV often has the smallest variability. For example, in the context of the kernel SVM, Wang and Zou (2021) provided convincing numerical examples to show that (1) LOOCV has almost no bias in estimating the generalization error; (2) LOOCV does not necessarily have higher variance than ten-fold and five-fold CV. Consequently LOOCV results in a smaller overall error when estimating the prediction error as compared with ten-fold and five-fold CV.
|
| 32 |
+
|
| 33 |
+
From the aforementioned arguments, we can see the only legitimate complaint of LOOCV would arise from its expensive computation, as a typical approach needs to fit the models $n$ times on the leave-one-out data before evaluating their performance with each of the sample removed, so the
|
| 34 |
+
|
| 35 |
+
computational cost is roughly $n$ times as large as the cost of a single fit on the full data. To mitigate the computational burden, Golub et al. (1979) proposed a shortcut formula of LOOCV for smoothing splines such that the whole computation time is of the same order of fitting a single model, and the shortcut formula later evolved into the generalized cross-validation (GCV) for ridge regression.
|
| 36 |
+
|
| 37 |
+
Nevertheless, for the kernel classifiers, how to efficiently compute the exact LOOCV is a long-standing open problem. The shortcut cross-validation formula has been long considered as a unique property of some linear smoothers, and many works such as the generalized approximated cross-validation (GACV) (Wahba et al., 1999) resorted to approximating LOOCV, while there is no theoretical guarantee that LOOCV can always be well approximated. To solve the exact (rather than approximated) LOOCV, until very recently, Wang and Zou (2022) successfully proposed a leave-one-out lemma extending the Golub-Heath-Wahba formula to the kernel classifiers. Specifically, they showed the exact LOOCV error can be obtained by slightly varying the class labels without literally leaving out some samples during the CV procedure. Since no sample is left out, all the folds of LOOCV are using the same complete data and thus redundant computational efforts can be saved to dramatically accelerate LOOCV. Based on the leave-one-out lemma, Wang and Zou (2022) unified the training and tuning of the SVM and developed a new magicsvm algorithm, which often runs a magnitude faster than the state-of-art SVM solvers, e.g., kernalab (Karatzoglou et al., 2004) and LIBSVM (Chang and Lin, 2011).
|
| 38 |
+
|
| 39 |
+
In this work, the main contribution is to propose a consolidated CV algorithm via data reduction. The data reduction method was first proposed by Ghaoui et al. (2010) for the lasso method (Tibshirani, 1996) and then extended to the SVM (Ogawa et al., 2013; Wang et al., 2014; Pan and Xu, 2018; Hong et al., 2019). The key renovation of our proposal is to reduce all the cross-validated data in a consolidated manner, thereby aiming to speedup the whole SVM procedure. Our method is fundamentally different from the existing methods which isolate the model training and tuning. Moreover, the existing data reduction methods cannot handle the SVM with the bias (intercept), which is essentially useful for achieving high prediction accuracy. To handle the SVM with the bias, we propose a novel two-stage consolidated $CV$ ; such an extension is highly non-trivial.
|
| 40 |
+
|
| 41 |
+
We implement the consolidate CV in a ccvsvm algorithm. Simulations and nine benchmark data are used to demonstrate the superior performance of ccvsvm To give a quick demonstration, our consolidated CV algorithm reduces the run time from more than 1.5 hours (by LIBSVM) to less than one minute, when performing the exact LOOCV for the kernel SVM on a data set arrhythmia.
|
| 42 |
+
|
| 43 |
+
The remainder of this paper is organized as follows. In Section 2, we discuss the exact leave-one-out lemma and then propose a consolidated CV algorithm via data reduction. Section 3 extends the consolidated CV to handle the general SVM problems with the bias. In Section 4, we demonstrate the computational advantages of fitting the kernel SVM using our proposed methods over the other competitors with simulations and real data applications. The paper is concluded in Section 5 with extensions through kernel approximations and discussions on future directions.
|
| 44 |
+
|
| 45 |
+
# 2 Methodology
|
| 46 |
+
|
| 47 |
+
# 2.1 SVM and the Exact Leave-One-Out Lemma
|
| 48 |
+
|
| 49 |
+
Since we need to work with the fundamentals of the SVM, we first review the SVM in this section.
|
| 50 |
+
|
| 51 |
+
We focus on binary classification. Let $L(u) = (1 - u)_{+} = \max(1 - u, 0)$ be the hinge loss. Suppose there are $n$ training samples, $(\mathbf{x}_i, y_i), i = 1, 2, \dots, n$ , where each $\mathbf{x}_i \in IR^p$ and $y_i = \pm 1$ . The SVM can be formulated as a function estimation problem in a reproducing kernel Hilbert space (Wahba, 1990):
|
| 52 |
+
|
| 53 |
+
$$
|
| 54 |
+
\hat {f} _ {l} = \underset {f \in \mathcal {H} _ {K}} {\operatorname {a r g m i n}} \left[ \frac {1}{n} \sum_ {i = 1} ^ {n} (1 - y _ {i} f (\mathbf {x} _ {i})) _ {+} + \lambda_ {l} \| f \| _ {\mathcal {H} _ {K}} ^ {2} \right], \tag {1}
|
| 55 |
+
$$
|
| 56 |
+
|
| 57 |
+
where $\lambda_l > 0$ is a tuning parameter chosen from a decreasing sequence $\lambda_1 > \lambda_2 > \dots > \lambda_L$ , $\mathcal{H}_K$ , the RKHS, is generated by a bivariate kernel function $K: \mathcal{X} \times \mathcal{X} \to \mathbb{R}$ , and the classifier $\hat{f}$ is thus dubbed kernel SVM. Throughout this paper, we consider the universal kernel, whose induced RKHS $\mathcal{H}_K$ is rich enough to yield arbitrarily accurate decision boundaries (Steinwart, 2001; Micchelli et al., 2006). A commonly used universal kernel is the radial kernel $K(\mathbf{x}_i, \mathbf{x}_j) = \exp(-\sigma\|\mathbf{x}_i - \mathbf{x}_j\|_2^2)$ .
|
| 58 |
+
|
| 59 |
+
By the representative theorem (Wahba, 1990), problem (1) has a finite-dimensional solution:
|
| 60 |
+
|
| 61 |
+
$$
|
| 62 |
+
\hat {\boldsymbol {\alpha}} _ {l} = \underset {\boldsymbol {\alpha} \in \mathbb {R} ^ {n}} {\operatorname {a r g m i n}} \left[ \frac {1}{n} \sum_ {i = 1} ^ {n} \left(1 - y _ {i} \mathbf {K} _ {i} ^ {\prime} \boldsymbol {\alpha}\right) _ {+} + \lambda_ {l} \boldsymbol {\alpha} ^ {\prime} \mathbf {K} \boldsymbol {\alpha} \right], \tag {2}
|
| 63 |
+
$$
|
| 64 |
+
|
| 65 |
+
where $\mathbf{K}$ is the $n\times n$ kernel matrix with $K_{ij} = K(\mathbf{x}_i,\mathbf{x}_j)$ and is assumed to be positive definite; $\mathbf{K}_i$ is its $i$ th row. Thus problem (2) has a unique minimizer $\hat{f} (\mathbf{x}) = \sum_{i = 1}^{n}\hat{\alpha}_{i}K(\mathbf{x}_{i},\mathbf{x})$
|
| 66 |
+
|
| 67 |
+
To tune the model, with the LOOCV procedure, the SVM is fitted on the training data with the $j$ th sample opted out: for each $l = 1,2,\ldots ,L$ and each $j = 1,2,\ldots ,n$ , let $\tilde{\alpha}_l^{[-j]}$ be
|
| 68 |
+
|
| 69 |
+
$$
|
| 70 |
+
\tilde {\boldsymbol {\alpha}} _ {l} ^ {[ - j ]} = \underset {\boldsymbol {\alpha} \in \mathbb {R} ^ {n - 1}} {\operatorname {a r g m i n}} \left[ \frac {1}{n} \sum_ {i \neq j} \left(1 - y _ {i} \left(\mathbf {K} _ {i} ^ {[ - j ]}\right) ^ {\prime} \boldsymbol {\alpha}\right) _ {+} + \lambda_ {l} \boldsymbol {\alpha} ^ {\prime} \mathbf {K} ^ {[ - j ]} \boldsymbol {\alpha} \right], \tag {3}
|
| 71 |
+
$$
|
| 72 |
+
|
| 73 |
+
where $\mathbf{K}^{[-j]}$ is the leave-one-out kernel matrix induced by the training data without the $j$ th sample. Problem (2) refers to the complete data problem, and problem (3) refers to the LOOCV problem.
|
| 74 |
+
|
| 75 |
+
The bottleneck of the LOOCV problem is mainly due to the computation involving $n$ different leave-one-out kernel matrices. To reduce the computational burden, this work is based on the exact leave-one-out lemma (Wang and Zou, 2022) for the kernel SVM, and the key idea is to obtain the exact LOOCV from the complete kernel matrix.
|
| 76 |
+
|
| 77 |
+
Lemma 2.1. (Exact leave-one-out lemma) For a given $j$ , let $\tilde{y}_i^{[j]} = y_i$ if $i \neq j$ and $\tilde{y}_j^{[j]} = 0$ . Define
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\hat {\boldsymbol {\alpha}} _ {l} ^ {[ - j ]} = \underset {\boldsymbol {\alpha} \in \mathbb {R} ^ {n}} {\operatorname {a r g m i n}} \left[ \frac {1}{n} \sum_ {i = 1} ^ {n} \left(1 - \tilde {y} _ {i} ^ {[ j ]} \mathbf {K} _ {i} ^ {\prime} \boldsymbol {\alpha}\right) _ {+} + \lambda_ {l} \boldsymbol {\alpha} ^ {\prime} \mathbf {K} \boldsymbol {\alpha} \right]. \tag {4}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
Then the solution of problem (3) can be obtained as
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\tilde {\alpha} _ {l} ^ {[ - j ]} = (\hat {\alpha} _ {1, l} ^ {[ - j ]}, \dots , \hat {\alpha} _ {j - 1, l} ^ {[ - j ]}, \hat {\alpha} _ {j + 1, l} ^ {[ - j ]}, \dots , \hat {\alpha} _ {n, l} ^ {[ - j ]}) ^ {\prime}.
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
Although problem (3) can be transformed into problem (4), the solutions of the two problems have different lengths. Lemma 2.1 indicates that $\hat{\alpha}_{l}^{[-j]} = (\tilde{\alpha}_{1,l}^{[-j]},\dots,\tilde{\alpha}_{j - 1,l}^{[-j]},0,\tilde{\alpha}_{j,l}^{[-j]},\dots,\tilde{\alpha}_{n - 1,l}^{[-j]})'$ , i.e. $\hat{\alpha}_{j,l}^{[-j]}$ , the $j$ th element of the solution $\hat{\alpha}_{l}^{[-j]}$ , is zero, and the solution of problem (3) can be retrieved by knocking off the $j$ th element from $\hat{\alpha}_{l}^{[-j]}$ .
|
| 90 |
+
|
| 91 |
+
As a consequence of transforming problem (3) into problem (4), the same kernel matrix $\mathbf{K}$ is used in all the folds during LOOCV, rather than the leave-one-out matrices $\mathbf{K}^{[-j]}$ , while slightly different responses are crafted for different $j$ . By sharing the same kernel matrix, some redundant calculations can be saved and Wang and Zou (2022) developed the efficient algorithm magicsvm.
|
| 92 |
+
|
| 93 |
+
# 2.2 Consolidated CV via Data Reduction
|
| 94 |
+
|
| 95 |
+
On the basis of Lemma 2.1, we propose a data reduction strategy to accelerate the LOOCV computation of the kernel SVM, which is referred to as consolidated $CV$ .
|
| 96 |
+
|
| 97 |
+
For notational convenience, the complete data problem (2) can be written as a special case of problem (4) with $j = 0$ , i.e., $\hat{\alpha}_l \equiv \hat{\alpha}_l^{[-0]}$ and
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\hat {\pmb {\alpha}} _ {l} ^ {[ - 0 ]} = \underset {\pmb {\alpha} \in \mathbb {R} ^ {n}} {\mathrm {a r g m i n}} \left[ \frac {1}{n} \sum_ {i = 1} ^ {n} \left(1 - \tilde {y} _ {i} ^ {[ 0 ]} \mathbf {K} _ {i} ^ {\prime} \pmb {\alpha}\right) _ {+} + \lambda_ {l} \pmb {\alpha} ^ {\prime} \mathbf {K} \pmb {\alpha} \right],
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where we define $\tilde{y}_i^{[0]} = y_i$ for each $i = 1,2,\dots,n$ . By solving problem (4) for all $j = 0,1,\ldots ,n$ , we both train the SVM through the complete data problem (2) and tune it using LOOCV.
|
| 104 |
+
|
| 105 |
+
The idea of consolidated CV is motivated by the sparsity of the solution $\hat{\alpha}_l^{[-j]}$ in problem (4). To see this, we check the optimality condition of problem (4) by taking the sub-differential of the objective with respect to each $\mathbf{K}_i^{\prime}\alpha$ , for each $j = 0,1,\ldots ,n$ :
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
0 \in \frac {1}{n} \tilde {y} _ {i} ^ {[ j ]} \partial L \left(\tilde {y} _ {i} ^ {[ j ]} {\bf K} _ {i} ^ {\prime} \hat {\alpha} _ {l} ^ {[ - j ]}\right) + 2 \lambda_ {l} \hat {\alpha} _ {i, l} ^ {[ - j ]}, \forall i = 1, \ldots , n,
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
where $\partial L(t)$ is the subgradient of the hinge loss function: $\partial L(t) = -1$ , if $t < 1$ ; $\partial L(t) = 0$ , if $t > 1$ ; and $\partial L(t) \in [-1,0]$ if $t = 1$ . It follows that
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\hat {\alpha} _ {i, l} ^ {[ - j ]} = \left\{ \begin{array}{l l} \frac {\tilde {y} _ {i} ^ {[ j ]}}{2 n \lambda_ {l}}, & \mathrm {i f} \tilde {y} _ {i} ^ {[ j ]} \mathbf {K} _ {i} ^ {\prime} \hat {\boldsymbol {\alpha}} _ {l} ^ {[ - j ]} < 1, \\ 0, & \mathrm {i f} \tilde {y} _ {i} ^ {[ j ]} \mathbf {K} _ {i} ^ {\prime} \hat {\boldsymbol {\alpha}} _ {l} ^ {[ - j ]} > 1. \end{array} \right.
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
By translating $\tilde{y}_i^{[j]}$ back to $y_{i}$ , we see
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
\hat {\alpha} _ {i, l} ^ {[ - j ]} = \left\{ \begin{array}{l l} \frac {y _ {i}}{2 n \lambda_ {l}}, & \text {i f} y _ {i} \mathbf {K} _ {i} ^ {\prime} \hat {\alpha} _ {l} ^ {[ - j ]} < 1 \text {a n d} i \neq j, \\ 0, & \text {i f} y _ {i} \mathbf {K} _ {i} ^ {\prime} \hat {\alpha} _ {l} ^ {[ - j ]} > 1 \text {o r} i = j. \end{array} \right. \tag {5}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
Expression (5) hints on a possible data reduction strategy: before invoking the actual calculation of $\hat{\alpha}_l^{[-j]}$ , if we are advised that $y_i\mathbf{K}_i'\hat{\alpha}_l^{[-j]} > 1$ for some $i$ , then we can directly set $\hat{\alpha}_{i,l}^{[-j]}$ to zero; likewise, if $y_{i}\mathbf{K}_{i}^{\prime}\hat{\alpha}_{l}^{[-j]} < 1$ is given, then $\hat{\alpha}_{i,l}^{[-j]}$ must be $y_{i} / (2n\lambda_{l})$ unless $i = j$ . We can predetermine the values of some coordinates and only need to focus on the calculation of the remaining ones. Hence the dimension of problem (4) can be reduced.
|
| 124 |
+
|
| 125 |
+
The key to performing the data reduction through expression (5) is to know whether $y_{i}\mathbf{K}_{i}^{\prime}\hat{\alpha}_{l}^{[-j]} < 1$ or $> 1$ for some $i$ before $\hat{\alpha}_l^{[-j]}$ is actually computed. We present the following theorem.
|
| 126 |
+
|
| 127 |
+
Theorem 2.2. For some $l > 1$ , suppose we have solved
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
\hat {\boldsymbol {\alpha}} _ {l - 1} = \operatorname * {a r g m i n} _ {\boldsymbol {\alpha} \in \mathbb {R} ^ {n}} \left[ \frac {1}{n} \sum_ {i = 1} ^ {n} \left(1 - y _ {i} \mathbf {K} _ {i} ^ {\prime} \boldsymbol {\alpha}\right) _ {+} + \lambda_ {l - 1} \boldsymbol {\alpha} ^ {\prime} \mathbf {K} \boldsymbol {\alpha} \right].
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
For each $i = 1,2,\ldots ,n$ define
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
a _ {i, l} ^ {+} = \frac {\lambda_ {l - 1} + \lambda_ {l}}{2 \lambda_ {l}} y _ {i} \mathbf {K} _ {i} ^ {\prime} \hat {\boldsymbol {\alpha}} _ {l - 1} + \frac {\lambda_ {l - 1} - \lambda_ {l}}{2 \lambda_ {l}} \sqrt {B} \sqrt {\hat {\boldsymbol {\alpha}} _ {l - 1} ^ {\prime} \mathbf {K} \hat {\boldsymbol {\alpha}} _ {l - 1}} + \frac {B}{2 n \lambda_ {l}},
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
a _ {i, l} ^ {-} = \frac {\lambda_ {l - 1} + \lambda_ {l}}{2 \lambda_ {l}} y _ {i} \mathbf {K} _ {i} ^ {\prime} \hat {\boldsymbol {\alpha}} _ {l - 1} - \frac {\lambda_ {l - 1} - \lambda_ {l}}{2 \lambda_ {l}} \sqrt {B} \sqrt {\hat {\boldsymbol {\alpha}} _ {l - 1} ^ {\prime} \mathbf {K} \hat {\boldsymbol {\alpha}} _ {l - 1}} - \frac {B}{2 n \lambda_ {l}},
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
where $B = \max_{i}K(\mathbf{x}_{i},\mathbf{x}_{i})$ . Then for each $j = 0,1,\ldots ,n$ , it holds
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
a _ {i, l} ^ {-} \leq y _ {i} \mathbf {K} _ {i} ^ {\prime} \hat {\alpha} _ {l} ^ {[ - j ]} \leq a _ {i, l} ^ {+}, \forall i \neq j. \tag {6}
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+
Further, let $\mathcal{L} = \{i:a_{i,l}^{+} < 1\}$ and $\mathcal{R} = \{i:a_{i,l}^{-} > 1\}$ . Then the solution of problem (4) satisfies that
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\hat {\alpha} _ {i, l} ^ {[ - j ]} = \left\{ \begin{array}{l l} \frac {\tilde {y} _ {i} ^ {[ j ]}}{2 n \lambda_ {l}}, & i f i \in \mathcal {L}; \\ 0, & i f i \in \mathcal {R}. \end{array} \right.
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
In Theorem 2.2, for radial and Laplacian kernels, we can directly set $B = 1$ ; for some unbounded kernels such as polynomial kernels, we calculate $B = \max_{i\in \{1,2,\dots,n\}}K(\mathbf{x}_i,\mathbf{x}_i)$ based on training data.
|
| 156 |
+
|
| 157 |
+
Note that Theorem 2.2 holds for $\hat{\alpha}_l^{[-j]}$ , $\forall j = 0,1,\ldots ,n$ . By utilizing knowledge of $\hat{\alpha}_{l - 1}$ , the solution of the complete data problem with the tuning parameter $\lambda_{l - 1}$ , we can pre-determining certain coordinates for both the complete data problem and all LOOCV problems with $\lambda_l$ , i.e., $\hat{\alpha}_l^{[-j]}$ for all $j = 0,1,\dots ,n$ , through $\mathcal{L}$ and $\mathcal{R}$ , thus performing data reduction in a consolidated fashion.
|
| 158 |
+
|
| 159 |
+
To solve problem (4), Theorem 2.2 implies that $\hat{\alpha}_{i,l}^{[-j]}$ for $i\in \mathcal{L}$ and $i\in \mathcal{R}$ can be pre-determined, so we only need to solve $\hat{\alpha}_{i,l}^{[-j]}$ , for $i\in S$ where $S\equiv (\mathcal{L}\cup \mathcal{R})^C$ . Denote by $\tau$ a one-to-one mapping from $\{1,2,\dots ,n_s\}$ to $S$ , where $n_{s}$ is the cardinality of $S$ . Let $\Gamma$ be the $n\times n_{s}$ sub-matrix of $\mathbf{K}$ such that its $i$ th column $\Gamma_{i} = \mathbf{K}_{\tau (i)}$ . Let $\boldsymbol{\Sigma}$ be the $n_{s}\times n_{s}$ matrix such that $\Sigma_{ij} = K_{\tau (i)\tau (j)}$ .
|
| 160 |
+
|
| 161 |
+
Algorithm 1 Consolidated cross-validation
|
| 162 |
+
Input: $\lambda_{1} > \lambda_{2} > \ldots >\lambda_{L},\mathbf{K},\mathbf{y}$
|
| 163 |
+
1:Obtain $\hat{\alpha}_{1} = \underset {\alpha \in \mathbb{R}^{n}}{\mathrm{argmin}}\frac{1}{n}\sum_{i = 1}^{n}(1 - y_{i}\mathbf{K}_{i}^{\prime}\pmb {\alpha})_{+} + \lambda_{1}\pmb{\alpha}^{\prime}\mathbf{K}\pmb{\alpha}.$
|
| 164 |
+
2:for $l = 2,3,\dots ,L$ do
|
| 165 |
+
3:Construct the sets $\mathcal{L}$ and $\mathcal{R}$ according to Theorem 2.2. Let $S = (\mathcal{L}\cup \mathcal{R})^C$
|
| 166 |
+
4:Construct the matrices $\Gamma$ and $\boldsymbol{\Sigma}$
|
| 167 |
+
5:for $j = 0,1,\dots ,n$ do
|
| 168 |
+
6:if $j > 0$ and $\hat{\alpha}_{j,l} = 0$ then
|
| 169 |
+
7:Obtain $\hat{\alpha}_l^{[-j]} = \hat{\alpha}_l$
|
| 170 |
+
8:else
|
| 171 |
+
9:Construct the vector $\bar{\mathbf{y}}^{[j]}$
|
| 172 |
+
10:Obtain $\hat{\eta}_l^{[-j]}$ by solving problem (8). (If $j > 0$ , initialize the algorithm by $\hat{\eta}_l$ .)
|
| 173 |
+
11:Obtain $\hat{\alpha}_l^{[-j]}$ from expression (7).
|
| 174 |
+
12:end if
|
| 175 |
+
13:end for
|
| 176 |
+
14:end for
|
| 177 |
+
Output: $\hat{\alpha}_l,\hat{\alpha}_l^{[-j]}$ , for each $j = 1,2,\dots ,n$ and $l = 1,2,\dots ,L$
|
| 178 |
+
|
| 179 |
+
For each $j = 0,1,\dots ,n$ , let $\bar{\mathbf{y}}^{[j]}$ be the $n$ -vector with the $i$ th element to be $\tilde{y}_i^{[j]}$ if $i\in S$ , and 0 if $i\notin S$ . The solution of problem (4) is obtained as
|
| 180 |
+
|
| 181 |
+
$$
|
| 182 |
+
\hat {\alpha} _ {i, l} ^ {[ - j ]} = \left\{ \begin{array}{l l} \frac {\tilde {y} _ {i} ^ {[ j ]}}{2 n \lambda_ {l}}, & \text {i f} i \in \mathcal {L}, \\ 0, & \text {i f} i \in \mathcal {R}, \\ \hat {\eta} _ {\tau^ {- 1} (i), l} ^ {[ - j ]}, & \text {i f} i \in \mathcal {S}, \end{array} \right. \tag {7}
|
| 183 |
+
$$
|
| 184 |
+
|
| 185 |
+
where $\hat{\eta}_{\tau^{-1}(i),l}^{[-j]}$ is the $\tau^{-1}(i)$ th element of
|
| 186 |
+
|
| 187 |
+
$$
|
| 188 |
+
\hat {\boldsymbol {\eta}} _ {l} ^ {[ - j ]} = \underset {\boldsymbol {\eta} \in \mathbb {R} ^ {n _ {s}}} {\operatorname {a r g m i n}} \left[ \frac {1}{n} \sum_ {i = 1} ^ {n} \left(1 - \hat {y} _ {i} ^ {[ j ]} \boldsymbol {\Gamma} _ {i} ^ {\prime} \boldsymbol {\eta} - \frac {1}{2 n \lambda_ {l}} \hat {y} _ {i} ^ {[ j ]} \mathbf {K} _ {i} ^ {\prime} \bar {\mathbf {y}} ^ {[ j ]}\right) _ {+} + \frac {1}{n} \bar {\mathbf {y}} ^ {[ j ] ^ {\prime}} \boldsymbol {\Gamma} \boldsymbol {\eta} + \lambda_ {l} \boldsymbol {\eta} ^ {\prime} \boldsymbol {\Sigma} \boldsymbol {\eta} \right]. \tag {8}
|
| 189 |
+
$$
|
| 190 |
+
|
| 191 |
+
The dimension of problem (8) is $n_s$ , which is lower than $n$ – the dimension of the original problem (4). The matrices $\Gamma$ and $\Sigma$ are the same for each $j = 0,1,\dots,n$ . We shall introduce an optimization algorithm for solving problem (8) in the next section.
|
| 192 |
+
|
| 193 |
+
In addition, by utilizing a fact that an SVM solution is unchanged if non-support-vector data are left out, namely, $\hat{\alpha}_{j,l} = 0$ for some $j$ implies $\hat{\alpha}_l^{[-j]} = \hat{\alpha}_l$ , we can directly obtain the $j$ th LOOCV solution from the complete data problem without solving problem (8). We summarize the consolidated CV algorithm in Algorithm 1.
|
| 194 |
+
|
| 195 |
+
# 2.3 A Consolidated Algorithm for Solving Problem (8)
|
| 196 |
+
|
| 197 |
+
Due to Theorem 2.2, we can perform LOOCV by solving problem (8), a reduced-optimization problem, for each $j$ . To overcome the computational challenge caused by non-smoothness of the hinge loss, we consider a smoothed loss,
|
| 198 |
+
|
| 199 |
+
$$
|
| 200 |
+
L _ {\tau} (u) = \left\{ \begin{array}{l l} 0 & u \geq 1 + \tau , \\ (u - (1 + \tau)) ^ {2} / (4 \tau) & 1 - \tau < u < 1 + \tau , \\ 1 - u & u \leq 1 - \tau , \end{array} \right.
|
| 201 |
+
$$
|
| 202 |
+
|
| 203 |
+
for some small $\tau > 0$ . One can show that $L_{\tau}$ has a Lipschitz continuous gradient, $|L_{\tau}'(t_1) - L_{\tau}'(t_2)| \leq \frac{1}{2\tau} |t_1 - t_2|, \forall t_1, t_2 \in IR$ . Thus a smoothed surrogate of problem (8) is
|
| 204 |
+
|
| 205 |
+
$$
|
| 206 |
+
\hat {\boldsymbol {\eta}} _ {\tau , l} ^ {[ - j ]} = \underset {\boldsymbol {\eta} \in \mathbb {R} ^ {n _ {s}}} {\operatorname {a r g m i n}} \left[ \frac {1}{n} \sum_ {i = 1} ^ {n} L ^ {\tau} \left(\tilde {y} _ {i} ^ {[ j ]} \boldsymbol {\Gamma} _ {i} ^ {\prime} \boldsymbol {\eta} - \frac {1}{2 n \lambda_ {l}} \tilde {y} _ {i} ^ {[ j ]} \mathbf {K} _ {i} ^ {\prime} \bar {\mathbf {y}} ^ {[ j ]}\right) + \frac {1}{n} \bar {\mathbf {y}} ^ {[ j ] ^ {\prime}} \boldsymbol {\Gamma} \boldsymbol {\eta} + \lambda_ {l} \boldsymbol {\eta} ^ {\prime} \boldsymbol {\Sigma} \boldsymbol {\eta} \right]. \tag {9}
|
| 207 |
+
$$
|
| 208 |
+
|
| 209 |
+
Problem (9) can be solved using the proximal gradient descent (PGD) algorithm (Parikh and Boyd, 2014). Specifically, the matrix inversion is computed first
|
| 210 |
+
|
| 211 |
+
$$
|
| 212 |
+
\mathbf {P} ^ {- 1} = \left(2 \lambda_ {l} \boldsymbol {\Sigma} + \frac {1}{n \tau} \boldsymbol {\Gamma} ^ {\prime} \boldsymbol {\Gamma}\right) ^ {- 1}. \tag {10}
|
| 213 |
+
$$
|
| 214 |
+
|
| 215 |
+
Then, for each $j = 0,1,\dots ,n$ , we update
|
| 216 |
+
|
| 217 |
+
$$
|
| 218 |
+
\boldsymbol {\eta} ^ {[ - j ]} \leftarrow \boldsymbol {\eta} ^ {[ - j ]} - \mathbf {P} ^ {- 1} \left(\Gamma^ {\prime} \mathbf {z} ^ {(k)} + \frac {1}{n} \Gamma^ {\prime} \bar {\mathbf {y}} ^ {[ j ]} + 2 \lambda_ {l} \boldsymbol {\Sigma} \boldsymbol {\eta} ^ {[ - j ]}\right) \tag {11}
|
| 219 |
+
$$
|
| 220 |
+
|
| 221 |
+
until convergence, and then let $\hat{\eta}_{\tau,l}^{[-j]} \gets \eta^{[-j]}$ . We claim the above algorithm is consolidated since the same matrix inversion $\mathbf{P}^{-1}$ obtained from equation (10) can be used in equation (11) for all $j$ (all folds.) By saving huge computational efforts in inverting $n$ matrices, the consolidated CV algorithm is much more efficient than the standard CV implementation. We also include the warm-start, say, using $\hat{\eta}_l$ to initialize $\hat{\eta}_l^{[-j]}$ in problem (8), and Nesterov's acceleration to further boost the algorithm.
|
| 222 |
+
|
| 223 |
+
We just discussed the PGD algorithm for solving a smoothed SVM problem (9). Interestingly, the exact SVM solution based on problem (8) can be obtained by iteratively solving problem (9) with $\tau_{1} > \tau_{2} > \ldots$ where $\tau_{1} = 1$ and $\tau_{k} = \tau_{k - 1} / 8$ for $k > 1$ . The iteration is able to reach the exact solution of problem (8) in a finite number of steps, following a simple projection step. To conserve space, we omit details and refer interesting readers to Wang and Zou (2022).
|
| 224 |
+
|
| 225 |
+
# 3 Two-stage Consolidated CV for the General SVM Problems
|
| 226 |
+
|
| 227 |
+
The consolidated CV developed in Section 2 does not include the bias; nonetheless, the SVM without the bias may have lower prediction accuracy and its use is limited in certain applications. Although a regularized bias can be used by adding a constant feature to the data matrix, the standard practice of the SVM does not regularize the bias term. Thus our goal is to compute the SVM with the bias, namely, the general SVM problems. In this section, we extend the consolidated CV to handle the general SVM problems. Such an extension turns out to be non-trivial.
|
| 228 |
+
|
| 229 |
+
The general SVM problem is formulated as follows,
|
| 230 |
+
|
| 231 |
+
$$
|
| 232 |
+
\left(\hat {\beta} _ {0, l}, \hat {\alpha} _ {l}\right) = \operatorname * {a r g m i n} _ {\beta_ {0} \in \mathbb {R}, \boldsymbol {\alpha} \in \mathbb {R} ^ {n}} \frac {1}{n} \sum_ {i = 1} ^ {n} \left[ 1 - y _ {i} \left(\beta_ {0} + \mathbf {K} _ {i} ^ {\prime} \boldsymbol {\alpha}\right) \right] _ {+} + \lambda_ {l} \boldsymbol {\alpha} ^ {\prime} \mathbf {K} \boldsymbol {\alpha}, \tag {12}
|
| 233 |
+
$$
|
| 234 |
+
|
| 235 |
+
and the corresponding LOOCV problems are, $j = 1,2,\dots ,n$
|
| 236 |
+
|
| 237 |
+
$$
|
| 238 |
+
\left(\hat {\beta} _ {0, l} ^ {[ - j ]}, \hat {\alpha} _ {l} ^ {[ - j ]}\right) = \operatorname * {a r g m i n} _ {\beta_ {0} \in \mathbb {R}, \boldsymbol {\alpha} \in \mathbb {R} ^ {n}} \frac {1}{n} \sum_ {i = 1} ^ {n} \left[ 1 - \tilde {y} _ {i} ^ {[ j ]} \left(\beta_ {0} + \mathbf {K} _ {i} ^ {\prime} \boldsymbol {\alpha}\right) \right] _ {+} + \lambda_ {l} \boldsymbol {\alpha} ^ {\prime} \mathbf {K} \boldsymbol {\alpha}. \tag {13}
|
| 239 |
+
$$
|
| 240 |
+
|
| 241 |
+
For notational convenience, we let $\tilde{y}_i^{[0]} = y_i$ and let $(\hat{\beta}_{0,l},\hat{\alpha}_l) = (\hat{\beta}_{0,l}^{[-0]},\hat{\alpha}_l^{[-0]})$ , so we extend problem (13) with $j = 0$ to include the complete data problem (12) as a special case.
|
| 242 |
+
|
| 243 |
+
The key difficulty of developing the consolidated CV procedure for the general SVM problems is that $|\hat{\beta}_{0,l} - \hat{\beta}_{0,l}^{[-j]}|$ is hard to bound. To this end, we propose a two-stage consolidated CV procedure, where we give a consolidated bound of $|\hat{\beta}_{0,l} - \hat{\beta}_{0,l}^{[-j]}|$ for all $j$ in the first stage.
|
| 244 |
+
|
| 245 |
+
For $l > 1$ , suppose we have found the solutions of problems (12) and (13) with the tuning parameter $\lambda_{l-1}$ . Denote these solutions by $(\hat{\beta}_{0,l-1}, \hat{\alpha}_{l-1})$ and $(\hat{\beta}_{0,l-1}^{[-j]}, \hat{\alpha}_{l-1}^{[-j]})$ . In Lemma 3.1, for each $i$ , we give a consolidated bound of $y_i \mathbf{K}_i' \hat{\alpha}_i^{[-j]}$ for all $j = 0, 1, \ldots, n$ and $j \neq i$ .
|
| 246 |
+
|
| 247 |
+
Lemma 3.1. For each $i = 1,2,\ldots ,n$ define
|
| 248 |
+
|
| 249 |
+
$$
|
| 250 |
+
c_{i,l}^{+} = \max_{\substack{j = 0,1,\ldots ,n\\ j\neq i}}\left\{\frac{\lambda_{l - 1} + \lambda_{l}}{2\lambda_{l}} y_{i}\mathbf{K}_{i}^{\prime}\hat{\boldsymbol{\alpha}}_{l - 1}^{[-j]} + \frac{\lambda_{l - 1} - \lambda_{l}}{2\lambda_{l}}\sqrt{B}\sqrt{\hat{\boldsymbol{\alpha}}_{l - 1}^{[-j]^{\prime}}\mathbf{K}\hat{\boldsymbol{\alpha}}_{l - 1}^{[-j]}}\right\} ,
|
| 251 |
+
$$
|
| 252 |
+
|
| 253 |
+
$$
|
| 254 |
+
c _ {i, l} ^ {-} = \min _ {\substack {j = 0, 1, \dots , n \\ j \neq i}} \left\{\frac {\lambda_ {l - 1} + \lambda_ {l}}{2 \lambda_ {l}} y _ {i} \mathbf {K} _ {i} ^ {\prime} \hat {\boldsymbol {\alpha}} _ {l - 1} ^ {[ - j ]} - \frac {\lambda_ {l - 1} - \lambda_ {l}}{2 \lambda_ {l}} \sqrt {B} \sqrt {\hat {\boldsymbol {\alpha}} _ {l - 1} ^ {[ - j ] ^ {\prime}} \mathbf {K} \hat {\boldsymbol {\alpha}} _ {l - 1} ^ {[ - j ]}} \right\}, \tag{14}
|
| 255 |
+
$$
|
| 256 |
+
|
| 257 |
+
Algorithm 2 Bi-section algorithm to find $\beta_{0,l}^{+}$ and $\beta_{0,l}^{-}$
|
| 258 |
+
|
| 259 |
+
Input: $\mathbf{y}, c_{i,l}^{-}, c_{i,l}^{+}, \epsilon = 10^{-7}$
|
| 260 |
+
|
| 261 |
+
1: Compute $B^{+}$ and $B^{-}$ as
|
| 262 |
+
|
| 263 |
+
$$
|
| 264 |
+
B ^ {+} = \max \left\{\max _ {\{i: y _ {i} = - 1 \}} \left\{c _ {i, l} ^ {+} - 1 \right\}, \max _ {\{i: y _ {i} = 1 \}} \left\{1 - c _ {i, l} ^ {-} \right\} \right\} + \epsilon ,
|
| 265 |
+
$$
|
| 266 |
+
|
| 267 |
+
$$
|
| 268 |
+
B ^ {-} = \min \left\{\min _ {\{i: y _ {i} = - 1 \}} \{c _ {i, l} ^ {-} - 1 \}, \min _ {\{i: y _ {i} = 1 \}} \{1 - c _ {i, l} ^ {+} \} \right\} - \epsilon .
|
| 269 |
+
$$
|
| 270 |
+
|
| 271 |
+
2: Let $a^+ \gets B^+, c^+ \gets B^-, b^+ \gets (a^+ + c^+) / 2$ .
|
| 272 |
+
3: repeat
|
| 273 |
+
4: Compute $\psi^{+}(b^{+})$
|
| 274 |
+
5: Let $a^+ \gets b^+$ and $b^+ \gets (b^+ + c^+) / 2$ if $\psi^+(b^+) < 0$ .
|
| 275 |
+
6: Let $c^+ \gets b^+$ and $b^+ \gets (a^+ + b^+) / 2$ if $\psi^+(b^+) \geq 0$ .
|
| 276 |
+
7: until $|a^{+} - c^{+}| < \epsilon$ .
|
| 277 |
+
8: Let $\beta_{0,l}^{+}\gets a^{+}$
|
| 278 |
+
9: Let $a^{-} \gets B^{+}$ , $c^{-} \gets B^{-}$ , $b^{-} \gets (a^{-} + c^{-}) / 2$ .
|
| 279 |
+
0: repeat
|
| 280 |
+
1: Compute $\psi^{-}(b^{-})$
|
| 281 |
+
2: Let $a^{-} \gets b^{-}$ and $b^{-} \gets (b^{-} + c^{-}) / 2$ if $\psi^{-}(b^{-}) \leq 0$ .
|
| 282 |
+
3: Let $c^{-} \gets b^{-}$ and $b^{-} \gets (a^{-} + b^{-}) / 2$ if $\psi^{-}(b^{-}) > 0$ .
|
| 283 |
+
4: until $|a^{-} - c^{-}| < \epsilon$ .
|
| 284 |
+
5: Let $\beta_{0,l}^{-}\gets c^{-}$
|
| 285 |
+
|
| 286 |
+
Output: $\beta_{0,l}^{+}$ and $\beta_{0,l}^{-}$
|
| 287 |
+
|
| 288 |
+
where $B = \max_{i}K(\mathbf{x}_{i},\mathbf{x}_{i})$ and $B = 1$ for the radial kernel. Then for any $i = 1,\dots ,n$ it holds that
|
| 289 |
+
|
| 290 |
+
$$
|
| 291 |
+
c _ {i, l} ^ {-} \leq y _ {i} \mathbf {K} _ {i} ^ {\prime} \hat {\boldsymbol {\alpha}} _ {l} ^ {[ - j ]} \leq c _ {i, l} ^ {+}, \forall j \neq i. \tag {15}
|
| 292 |
+
$$
|
| 293 |
+
|
| 294 |
+
On the basis of the bounds of $y_{i}\mathbf{K}_{i}^{\prime}\hat{\alpha}_{l}^{[-j]}$ given in Lemma 3.1, we next present Lemma 3.2 and Lemma 3.3 to give bounds of $\hat{\beta}_{0,l}^{[-j]}$ that are consolidated for all $j = 0,1,\dots ,n$ .
|
| 295 |
+
|
| 296 |
+
Lemma 3.2. With $c_{i,l}^{-}$ and $c_{i,l}^{+}$ from Lemma 3.1, for a given constant $b$ , define $S_1(b) = \{i : y_i b + c_{i,l}^{+} < 1\}$ and $S_2(b) = \{i : y_i b + c_{i,l}^{-} > 1\}$ . Let $n_+(b) = \sum_{i \in (S_1(b) \cup S_2(b))^C} I(y_i = 1)$ and $n_-(b) = \sum_{i \in (S_1(b) \cup S_2(b))^C} I(y_i = -1)$ . Define $\psi^+(b) = \sum_{i \in S_1(b)} y_i + n_+(b) + 1$ and $\psi^-(b) = \sum_{i \in S_1(b)} y_i - n_-(b) - 1$ . Then we have
|
| 297 |
+
|
| 298 |
+
(1) both $\psi^{+}(b)$ and $\psi^{-}(b)$ are non-increasing in $b$ ;
|
| 299 |
+
(2) $\psi^{+}(b) < 0$ implies $b > \hat{\beta}_{0,l}^{[-j]}$ for all $j = 0,1,\ldots ,n;$
|
| 300 |
+
(3) $\psi^{-}(b) > 0$ implies $b < \hat{\beta}_{0,l}^{[-j]}$ for all $j = 0,1,\ldots ,n$ .
|
| 301 |
+
|
| 302 |
+
Following Lemma 3.2, we develop a bi-section algorithm in Algorithm 2 to give consolidated bounds for $\hat{\beta}_{0,l}^{[-j]}$ for all $j = 0,1,\ldots ,n$ .
|
| 303 |
+
|
| 304 |
+
As shown in Lemma 3.3, Algorithm 2 yields consolidated bounds for $\hat{\beta}_{0,l}^{[-j]}$ for all $j = 0,1,\ldots ,n$
|
| 305 |
+
|
| 306 |
+
Lemma 3.3. Suppose the input of Algorithm 2, $c_{i,l}^{-}$ and $c_{i,l}^{+}$ , satisfies inequality (15), then the output of Algorithm 2, $\beta_{0,l}^{+}$ and $\beta_{0,l}^{-}$ , satisfies that
|
| 307 |
+
|
| 308 |
+
$$
|
| 309 |
+
\beta_ {0, l} ^ {-} < \hat {\beta} _ {0, l} ^ {[ - j ]} < \beta_ {0, l} ^ {+}. \forall j = 0, 1, \dots , n. \tag {16}
|
| 310 |
+
$$
|
| 311 |
+
|
| 312 |
+
It immediately follows from Lemma 3.3 that
|
| 313 |
+
|
| 314 |
+
$$
|
| 315 |
+
\left| \hat {\beta} _ {0, l} - \hat {\beta} _ {0, l} ^ {[ - j ]} \right| < \beta_ {0, l} ^ {+} - \beta_ {0, l} ^ {-}, \tag {17}
|
| 316 |
+
$$
|
| 317 |
+
|
| 318 |
+
for any $j$ , achieving the goal of the first stage.
|
| 319 |
+
|
| 320 |
+
We have constructed the bounds in inequalities (15) and (16). However, these bounds are too loose to develop data reduction rules in practice. The loose bounds are mainly caused by the maximum and minimum operators that are involved in equations (14). To this end, in the second stage, we give refined bounds, which are presented below.
|
| 321 |
+
|
| 322 |
+
Lemma 3.4. For each $i = 1,2,\ldots ,n$ define
|
| 323 |
+
|
| 324 |
+
$$
|
| 325 |
+
\tilde {c} _ {i, l} ^ {+} = \frac {\lambda_ {l - 1} + \lambda_ {l}}{2 \lambda_ {l}} y _ {i} \mathbf {K} _ {i} ^ {\prime} \hat {\boldsymbol {\alpha}} _ {l - 1} + \frac {\lambda_ {l - 1} - \lambda_ {l}}{2 \lambda_ {l}} \sqrt {B} \sqrt {\hat {\boldsymbol {\alpha}} _ {l - 1} ^ {\prime} \mathbf {K} \hat {\boldsymbol {\alpha}} _ {l - 1}} + \sqrt {\frac {B ^ {2}}{1 6 n ^ {2} \lambda_ {l} ^ {2}} + \frac {B (\beta_ {0 , l} ^ {+} - \beta_ {0 , l} ^ {-})}{2 n \lambda_ {l}}} + \frac {B}{4 n \lambda_ {l}},
|
| 326 |
+
$$
|
| 327 |
+
|
| 328 |
+
$$
|
| 329 |
+
\tilde {c} _ {i, l} ^ {-} = \frac {\lambda_ {l - 1} + \lambda_ {l}}{2 \lambda_ {l}} y _ {i} \mathbf {K} _ {i} ^ {\prime} \hat {\boldsymbol {\alpha}} _ {l - 1} - \frac {\lambda_ {l - 1} - \lambda_ {l}}{2 \lambda_ {l}} \sqrt {B} \sqrt {\hat {\boldsymbol {\alpha}} _ {l - 1} ^ {\prime} \mathbf {K} \hat {\boldsymbol {\alpha}} _ {l - 1}} - \sqrt {\frac {B ^ {2}}{1 6 n ^ {2} \lambda_ {l} ^ {2}} + \frac {B (\beta_ {0 , l} ^ {+} - \beta_ {0 , l} ^ {-})}{2 n \lambda_ {l}}} - \frac {B}{4 n \lambda_ {l}},
|
| 330 |
+
$$
|
| 331 |
+
|
| 332 |
+
where $\beta_{0,l}^{+}$ and $\beta_{0,l}^{-}$ are produced by Algorithm 2. Then for any $j = 1,\ldots ,n$ , it holds that
|
| 333 |
+
|
| 334 |
+
$$
|
| 335 |
+
\tilde {c} _ {i, l} ^ {-} \leq y _ {i} \mathbf {K} _ {i} ^ {\prime} \hat {\alpha} _ {l} ^ {[ - j ]} \leq \tilde {c} _ {i, l} ^ {+}, \forall j = 0, 1, \dots , n, a n d j \neq i. \tag {18}
|
| 336 |
+
$$
|
| 337 |
+
|
| 338 |
+
Hence by Lemmata 3.1 and 3.4, we have
|
| 339 |
+
|
| 340 |
+
$$
|
| 341 |
+
\hat {c} _ {i, l} ^ {-} \equiv \max \left\{c _ {i, l} ^ {-}, \tilde {c} _ {i, l} ^ {-} \right\} \leq y _ {i} \mathbf {K} _ {i} ^ {\prime} \hat {\boldsymbol {\alpha}} _ {l} ^ {[ - j ]} \leq \min \left\{c _ {i, l} ^ {+}, \tilde {c} _ {i, l} ^ {+} \right\} \equiv \hat {c} _ {i, l} ^ {+}, \tag {19}
|
| 342 |
+
$$
|
| 343 |
+
|
| 344 |
+
for any $j = 0,1,\ldots ,n$ , and $j\neq i$ . We then use $\max \{c_{i,l}^{-},\tilde{c}_{i,l}^{-}\}$ and $\min \{c_{i,l}^{+},\tilde{c}_{i,l}^{+}\}$ as the input in the bi-section algorithm to yield the output $\tilde{\beta}_{0,l}^{+}$ and $\tilde{\beta}_{0,l}^{-}$ . By inequality (18) and Lemma 3.2, we have
|
| 345 |
+
|
| 346 |
+
$$
|
| 347 |
+
\tilde {\beta} _ {0, l} ^ {-} < \hat {\beta} _ {0, l} ^ {[ - j ]} < \tilde {\beta} _ {0, l} ^ {+}. \forall j = 0, 1, \dots , n. \tag {20}
|
| 348 |
+
$$
|
| 349 |
+
|
| 350 |
+
Therefore, we glean inequalities (18) and (20), which are the refined bounds of inequalities (15) and (16). Using the refined bounds, we now present the main theorem.
|
| 351 |
+
|
| 352 |
+
Theorem 3.5. The solution of problem (12), $\hat{\alpha}_l$ , satisfies:
|
| 353 |
+
|
| 354 |
+
$$
|
| 355 |
+
\hat {\alpha} _ {i, l} = \left\{ \begin{array}{l l} \frac {y _ {i}}{2 n \lambda_ {l}}, & i f i \in \tilde {\mathcal {L}}; \\ 0, & i f i \in \tilde {\mathcal {R}}, \end{array} \right.
|
| 356 |
+
$$
|
| 357 |
+
|
| 358 |
+
and for any $j = 1,\ldots ,n$ , the solution of problem (13), $\hat{\alpha}_l^{[-j]}$ , satisfies:
|
| 359 |
+
|
| 360 |
+
$$
|
| 361 |
+
\hat {\alpha} _ {i, l} ^ {[ - j ]} = \left\{ \begin{array}{l l} \frac {y _ {i}}{2 n \lambda_ {l}}, & \text {i f i \in \tilde {\mathcal {L}} a n d i \neq j ;} \\ 0, & \text {i f i \in \tilde {\mathcal {R}} o r i = j ,} \end{array} \right.
|
| 362 |
+
$$
|
| 363 |
+
|
| 364 |
+
where $\hat{c}_{i,l}^{+}$ and $\hat{c}_{i,l}^{-}$ are given in inequality (19) and
|
| 365 |
+
|
| 366 |
+
$$
|
| 367 |
+
\tilde {\mathcal {L}} = \left\{i: y _ {i} = 1 a n d \tilde {\beta} _ {0, l} ^ {+} + \hat {c} _ {i, l} ^ {+} < 1 \right\} \cup \left\{i: y _ {i} = - 1 a n d - \tilde {\beta} _ {0, l} ^ {-} + \hat {c} _ {i, l} ^ {+} < 1 \right\},
|
| 368 |
+
$$
|
| 369 |
+
|
| 370 |
+
$$
|
| 371 |
+
\tilde {\mathcal {R}} = \left\{i: y _ {i} = 1 a n d \tilde {\beta} _ {0, l} ^ {-} + \hat {c} _ {i, l} ^ {-} > 1 \right\} \cup \left\{i: y _ {i} = - 1 a n d - \tilde {\beta} _ {0, l} ^ {+} + \hat {c} _ {i, l} ^ {-} > 1 \right\}.
|
| 372 |
+
$$
|
| 373 |
+
|
| 374 |
+
Thus by Theorem 3.5, problem (13) can be solved through some reduced-dimensional optimization problems, which are similar to problem (8) where the bias is excluded. Therefore, we can follow the discussions in Section 2.3 to employ the same PGD algorithm and the exact smoothing technique to obtain the exact solution for problem (13). Details of the algorithm are omitted to conserve space.
|
| 375 |
+
|
| 376 |
+
Table 1: Run time (in second), objective value, and test error of four kernel SVM solvers under mixture Gaussian distributed data with $p = \{20,200\}$ , and $n = \{200,400,800,1600\}$ . The test error is assessed on independently generated test data. The numbers are the average quantities over 50 independent runs and the standard errors are presented in parentheses.
|
| 377 |
+
|
| 378 |
+
<table><tr><td>p</td><td>n</td><td>method</td><td>time (s)</td><td>objective</td><td>test error</td><td>method</td><td>time (s)</td><td>objective</td><td>test error</td></tr><tr><td rowspan="8">20</td><td rowspan="2">200</td><td>ccvsvm</td><td>5.1</td><td>0.814 (.005)</td><td>0.351 (.007)</td><td>kernlab</td><td>73.4</td><td>0.814 (.005)</td><td>0.351 (.007)</td></tr><tr><td>magicsvm</td><td>7.7</td><td>0.814 (.005)</td><td>0.351 (.007)</td><td>LIBSVM</td><td>144.4</td><td>0.828 (.014)</td><td>0.351 (.007)</td></tr><tr><td rowspan="2">400</td><td>ccvsvm</td><td>44.2</td><td>0.827 (.003)</td><td>0.332 (.005)</td><td>kernlab</td><td>334.3</td><td>0.827 (.003)</td><td>0.332 (.005)</td></tr><tr><td>magicsvm</td><td>87.8</td><td>0.827 (.003)</td><td>0.332 (.005)</td><td>LIBSVM</td><td>879.7</td><td>0.827 (.003)</td><td>0.332 (.005)</td></tr><tr><td rowspan="2">800</td><td>ccvsvm</td><td>446.8</td><td>0.846 (.002)</td><td>0.309 (.002)</td><td>kernlab</td><td>2220.2</td><td>0.846 (.002)</td><td>0.309 (.002)</td></tr><tr><td>magicsvm</td><td>847.3</td><td>0.846 (.002)</td><td>0.309 (.002)</td><td>LIBSVM</td><td>6519.7</td><td>0.846 (.002)</td><td>0.310 (.002)</td></tr><tr><td rowspan="2">1600</td><td>ccvsvm</td><td>3829.5</td><td>0.853 (.001)</td><td>0.297 (.001)</td><td>kernlab</td><td>25530.5</td><td>0.853 (.001)</td><td>0.297 (.001)</td></tr><tr><td>magicsvm</td><td>7024.1</td><td>0.853 (.001)</td><td>0.297 (.001)</td><td>LIBSVM</td><td>63886.1</td><td>0.853 (.001)</td><td>0.297 (.001)</td></tr><tr><td rowspan="8">200</td><td rowspan="2">200</td><td>ccvsvm</td><td>6.8</td><td>0.780 (.006)</td><td>0.337 (.015)</td><td>kernlab</td><td>337.6</td><td>0.780 (.006)</td><td>0.339 (.015)</td></tr><tr><td>magicsvm</td><td>12.9</td><td>0.780 (.006)</td><td>0.337 (.015)</td><td>LIBSVM</td><td>932.5</td><td>0.780 (.006)</td><td>0.342 (.015)</td></tr><tr><td rowspan="2">400</td><td>ccvsvm</td><td>66.0</td><td>0.794 (.003)</td><td>0.366 (.015)</td><td>kernlab</td><td>2304.1</td><td>0.794 (.003)</td><td>0.366 (.015)</td></tr><tr><td>magicsvm</td><td>150.1</td><td>0.794 (.003)</td><td>0.366 (.015)</td><td>LIBSVM</td><td>6641.9</td><td>0.794 (.003)</td><td>0.368 (.015)</td></tr><tr><td rowspan="2">800</td><td>ccvsvm</td><td>530.4</td><td>0.811 (.001)</td><td>0.346 (.015)</td><td>kernlab</td><td>36771.4</td><td>0.811 (.001)</td><td>0.346 (.015)</td></tr><tr><td>magicsvm</td><td>996.1</td><td>0.811 (.001)</td><td>0.346 (.015)</td><td>LIBSVM</td><td>109365.5</td><td>0.811 (.001)</td><td>0.346 (.015)</td></tr><tr><td rowspan="2">1600</td><td>ccvsvm</td><td>5489.2</td><td>0.821 (.001)</td><td>0.322 (.013)</td><td>kernlab</td><td>461245.7</td><td>0.821 (.001)</td><td>0.322 (.013)</td></tr><tr><td>magicsvm</td><td>10803.9</td><td>0.821 (.001)</td><td>0.322 (.013)</td><td>LIBSVM</td><td>1436416.1</td><td>0.821 (.001)</td><td>0.322 (.013)</td></tr></table>
|
| 379 |
+
|
| 380 |
+
# 4 Numerical Studies
|
| 381 |
+
|
| 382 |
+
In this section, we demonstrate the computational advantages of fitting the kernel SVM using ccvsvm over the three other competitors, magicsvm, knrlab, and LIBSVM, with simulations and real data.
|
| 383 |
+
|
| 384 |
+
# 4.1 Simulations
|
| 385 |
+
|
| 386 |
+
A commonly used simulation data from mixture Gaussian distributions (Hastie et al., 2009) is used. We generate mean vectors $\pmb{\mu}_{k_{+}}$ from $\mathrm{N}(\pmb{\mu}_{+},\mathbf{I}_p)$ where $k = 1,2,\dots ,10$ in which $\pmb{\mu}_{+} = (1,1,\dots ,1,0,0,\dots ,0)$ with half of the coordinates to be zero. Each positive-class training sample is independently generated from $\mathrm{N}(\pmb{\mu}_{k_{+}},3^{2})$ where $k$ is drawn from the discrete uniform distribution on $\{1,2,\ldots ,10\}$ . Using the same procedure, we obtain the negative-class training data from $\mathrm{N}(\pmb{\mu}_{k_{-}},3^{2})$ where $k$ is also uniform on $\{1,2,\dots ,10\}$ and $\pmb{\mu}_{-} = (0,0,\dots ,0,1,1,\dots ,1)$ . For each combination of the feature dimension $p = 20$ and 200 and the sample size $n = 200,400,800,$ and 1600, we fit the kernel SVM using the four kernel SVM solvers, ccvsvm, magicsvm, kernlab, and LIBSVM, to compute the entire solution paths at a sequence of 50 tuning parameters, uniformly distributed on the logarithm scale between $e^{-6}$ and $e^6$ . The radial kernel is used and the bandwidth is the default option of kernlab, which generally performs well. We compared the run time, objective function value, and test error of the four solvers, where the run time includes the whole computation process including training and tuning the model. The objective function value is computed from equation (2). Test error is assessed on 10,000 test samples which are independently generated from the same distribution. Computations were conducted on an Intel(R) Xeon(R) Gold 6230 CPU @ 2.10 GHz.
|
| 387 |
+
|
| 388 |
+
Table 1 shows that, to reach the same objective value and the test error, our ccvsvm algorithm is roughly twice as fast as magicsvm, and it is about an order of magnitude faster than kernlab and LIBSVM. In addition, we observe that kernlab and LIBSVM significantly slow down as $p$ increases, e.g., LIBSVM is about 20 times slower when $p$ grows from 20 to 200, whereas the speed of ccvsvm and magicsvm is quite insensitive to the change of dimensions. Remarkably, for $p = 200$ and $n = 1600$ , our ccvsvm algorithm finishes training and tuning the SVM using LOOCV within two hours, while with the same accuracy, LIBSVM spends about 400 hours, lasting over 16 days.
|
| 389 |
+
|
| 390 |
+
We exemplify the effect of data reduction using the simulation data with $n = 800$ and $p = 20$ and profile the execution time for training and tuning the SVM with $\lambda = 0.1$ . We observe magicsvm took only 0.12 seconds for matrix inversions and 11.17 seconds for LOOCV through problem (4), whereas ccvsvm spent 0.03 seconds on matrix inversions and 5.81 seconds on LOOCV via problem (8). The advantage of ccvsvm over magicsvm is mainly attributed to the reduced dimension of problem (8) compared with problem (4).
|
| 391 |
+
|
| 392 |
+
Table 2: Run time (in second) of four SVM solvers for benchmark data, averaged over 50 runs.
|
| 393 |
+
|
| 394 |
+
<table><tr><td>data</td><td>n</td><td>p</td><td>ccvsvm</td><td>magicsvm</td><td>kernlab</td><td>LIBSVM</td></tr><tr><td>arrhythmia</td><td>452</td><td>191</td><td>48.076</td><td>113.099</td><td>1554.579</td><td>5061.881</td></tr><tr><td>australian</td><td>690</td><td>14</td><td>202.863</td><td>412.323</td><td>902.463</td><td>2178.644</td></tr><tr><td>chess</td><td>3196</td><td>37</td><td>21768.612</td><td>38942.348</td><td>>240 hours</td><td>>240 hours</td></tr><tr><td>heart</td><td>270</td><td>13</td><td>8.464</td><td>16.466</td><td>89.373</td><td>168.477</td></tr><tr><td>leuk</td><td>72</td><td>7218</td><td>0.464</td><td>0.828</td><td>1548.724</td><td>4811.612</td></tr><tr><td>malaria</td><td>71</td><td>22283</td><td>0.504</td><td>0.819</td><td>4804.442</td><td>13835.143</td></tr><tr><td>musk</td><td>476</td><td>166</td><td>62.169</td><td>127.656</td><td>1563.262</td><td>4778.779</td></tr><tr><td>sonar</td><td>208</td><td>60</td><td>4.736</td><td>7.033</td><td>98.080</td><td>221.505</td></tr><tr><td>valley</td><td>606</td><td>100</td><td>149.034</td><td>311.147</td><td>2230.010</td><td>6428.014</td></tr></table>
|
| 395 |
+
|
| 396 |
+
# 4.2 Benchmark Data Applications
|
| 397 |
+
|
| 398 |
+
We test the performance on benchmark data applications. We study nine commonly used real data applications from the UCI machine learning repository (Dua and Graff, 2017). The sample sizes range from 208 to 3, 196. Two high-dimensional data sets with the number of features $p = 7, 218$ and 22, 283 are included. Each data set is split into a training set and a test set with the ratio $9:1$ . The kernel SVM is trained and tuned by the four solvers on the training set, and the test error is assessed on the test set. We adopt the training-test split-ratio $9:1$ because we aim to assign most of the samples to the training set and the computation time can be evaluated using relatively large data.
|
| 399 |
+
|
| 400 |
+
Table 2 exhibits the timing comparisons, where we discover our ccvsvm algorithm is clearly the fastest. It is about as twice as fast as magicsvm and significantly faster than kernellab and LIBSVM. Especially for the two high-dimensional examples, magicsvm is thousands or even tens of thousands faster than kernellab and LIBSVM, and ccvsvm further cuts the run time of magicsvm into half. Similar to the simulations, all the four kernel SVM solvers deliver almost the same objective values and test errors on the real data applications; for sake of space limit, the accuracy results are omitted.
|
| 401 |
+
|
| 402 |
+
# 5 Discussions and Extensions
|
| 403 |
+
|
| 404 |
+
In this work, we have introduced a consolidated CV procedure and developed an algorithm called ccvsvm for the kernel SVM, which is one of the most successful classifiers. Our work is built on the recently proposed leave-one-out lemma and the magicsvm algorithm: the ccvsvm algorithm can even double the speed of magicsvm, which has already shown remarkable computational advantages over the mainstream SVM solvers, kernalab and LIBSVM.
|
| 405 |
+
|
| 406 |
+
Scaling ccvsvm to large data sets. For large-scale data, we suggest incorporating kernel approximation into the existing consolidated CV algorithm. Specifically, random features (Rahimi and Recht, 2007) or Nyström subsampling (Rudi et al., 2015) can be applied in the exact leave-one-out formula of the SVM to find a low-cost approximation of the kernel matrix. Integrating these approximation techniques into our methods can further improve the numerical performance. These strategies can also improve generalization performances as they induce a form of implicit computational regularization. In the supplemental materials (Section C), we develop consolidated CV methods with kernel approximation, essentially converting the original consolidated kernel SVM to a consolidated linear SVM, which then can be efficiently solved by the proposed ccvsvm algorithm. To give a quick demonstration, we consider the simulation example in Section 4.1 with $p = 20$ and increase $n$ to be 5,000,000. Averaged by 50 runs, the SVM with random features can be rapidly trained and tuned in 831 seconds, giving test error 0.286 which is close to Bayes error, 0.260. The computation time is only 15.7 seconds when $n = 100,000$ . However, when $n$ is 800, the test error of the SVM with random features is 0.351, which is well above 0.309, the test error of our exact kernel SVM solver given in Table 1. We leave full investigations of this strategy to future works.
|
| 407 |
+
|
| 408 |
+
Limitation. The proposed method is only for LOOCV and SVM since it utilizes the special structure of support vectors. However, in future works, it is interesting to explore if the consolidated CV can be generalized to other $K$ -fold CV or the hold-out validation more broadly. It is also interesting to extend the idea of consolidated CV to solve the solution paths of other computationally expensive machine learning methods such as support vector regression and kernel quantile regression.
|
| 409 |
+
|
| 410 |
+
Societal impact. This work does not present any foreseeable societal consequence.
|
| 411 |
+
|
| 412 |
+
# References
|
| 413 |
+
|
| 414 |
+
ARLOT, S. and CELISSE, A. (2010). A survey of cross-validation procedures for model selection. Statistics Surveys 4 40-79.
|
| 415 |
+
BENGIO, Y. and GRANDVALET, Y. (2004). No unbiased estimator of the variance of $k$ -fold cross-validation. Journal of Machine Learning Research 5 1089-1105.
|
| 416 |
+
BURMAN, P. (1989). A comparative study of ordinary cross-validation, $v$ -fold cross-validation and the repeated learning-testing methods. Biometrika 76 503-514.
|
| 417 |
+
CHANG, C.-C. and LIN, C.-J. (2011). Libsvm: A library for support vector machines. ACM Transactions on Intelligent Systems and Technology (TIST) 2 1-27.
|
| 418 |
+
CORTES, C. and VAPNIK, V. (1995). Support-vector networks. Machine Learning 20 273-297.
|
| 419 |
+
DUA, D. and GRAFF, C. (2017). UCI machine learning repository. URL http://archive.ics.uci.edu/ml
|
| 420 |
+
GHAOUI, L. E., VIALLON, V. and RABBANI, T. (2010). Safe feature elimination for the lasso and sparse supervised learning problems. arXiv preprint arXiv:1009.4219.
|
| 421 |
+
GOLUB, G. H., HEATH, M. and WAHBA, G. (1979). Generalized cross-validation as a method for choosing a good ridge parameter. Technometrics 21 215-223.
|
| 422 |
+
HASTIE, T., TIBSHIRANI, R. and FRIEDMAN, J. (2009). The Elements of Statistical Learning. Springer Series in Statistics, Springer New York Inc., New York, NY, USA.
|
| 423 |
+
HONG, B., ZHANG, W., LIU, W., YE, J., CAI, D., HE, X. and WANG, J. (2019). Scaling up sparse support vector machines by simultaneous feature and sample reduction. Journal of Machine Learning Research 20 1-39.
|
| 424 |
+
KARATZOGLOU, A., SMOLA, A., HORNIK, K. and ZEILEIS, A. (2004). kernlab-an S4 package for kernel methods in R. Journal of Statistical Software 11 1-20.
|
| 425 |
+
KOHAVI, R. (1995). A study of cross-validation and bootstrap for accuracy estimation and model selection. In International Joint Conference on Artificial Intelligence, vol. 14.
|
| 426 |
+
MICCHELLI, C. A., XU, Y. and ZHANG, H. (2006). Universal kernels. Journal of Machine Learning Research 7 2651-2667.
|
| 427 |
+
MOLINARO, A. M., SIMON, R. and PFEIFFER, R. M. (2005). Prediction error estimation: a comparison of resampling methods. Bioinformatics 21 3301-3307.
|
| 428 |
+
OGAWA, K., SUZUKI, Y. and TAKEUCHI, I. (2013). Safe screening of non-support vectors in pathwise SVM computation. In International Conference on Machine Learning.
|
| 429 |
+
PAN, X. and Xu, Y. (2018). A novel and safe two-stage screening method for support vector machine. IEEE Transactions on Neural Networks and Learning Systems 30 2263-2274.
|
| 430 |
+
PARIKH, N. and BOYD, S. (2014). Proximal algorithms. Foundations and Trends in Optimization 1 127-239.
|
| 431 |
+
RAHIMI, A. and RECHT, B. (2007). Random features for large-scale kernel machines. In Advances in Neural Information Processing Systems, vol. 20.
|
| 432 |
+
RUDI, A., CAMORIANO, R. and ROSASCO, L. (2015). Less is more: Nyström computational regularization. In Advances in Neural Information Processing Systems, vol. 28.
|
| 433 |
+
STEINWART, I. (2001). On the influence of the kernel on the consistency of support vector machines. Journal of Machine Learning Research 2 67-93.
|
| 434 |
+
TIBSHIRANI, R. (1996). Regression shrinkage and selection via the lasso. Journal of the Royal Statistical Society: Series B (Methodological) 58 267-288.
|
| 435 |
+
VAPNIK, V. (1995). The Nature of Statistical Learning Theory. Springer Science & Business Media.
|
| 436 |
+
VAPNIK, V. (1998). Statistical Learning Theory. Wiley.
|
| 437 |
+
WAHBA, G. (1990). Spline Models for Observational Data, vol. 59. SIAM.
|
| 438 |
+
|
| 439 |
+
WAHBA, G., LIN, Y. and ZHANG, H. (1999). GACV for support vector machines. In Advances in Neural Information Processing Systems, vol. 12.
|
| 440 |
+
WAHBA, G. and WOLD, S. (1975). Periodic splines for spectral density estimation: The use of cross validation for determining the degree of smoothing. Communications in Statistics-Theory and Methods 4 125-141.
|
| 441 |
+
WANG, B. and ZOU, H. (2021). Honest leave-one-out cross-validation for estimating post-tuning generalization error. Stat 10.
|
| 442 |
+
WANG, B. and ZOU, H. (2022). Fast and exact leave-one-out analysis of large-margin classifiers. Technometrics 64 291-298.
|
| 443 |
+
WANG, J., WONKA, P. and YE, J. (2014). Scaling SVM and least absolute deviations via exact data reduction. In International Conference on Machine Learning.
|
| 444 |
+
ZHANG, Y. and YANG, Y. (2015). Cross-validation for selecting a model selection procedure. Journal of Econometrics 187 95-112.
|
| 445 |
+
|
| 446 |
+
# Checklist
|
| 447 |
+
|
| 448 |
+
1. For all authors...
|
| 449 |
+
|
| 450 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 451 |
+
(b) Did you describe the limitations of your work? [Yes]
|
| 452 |
+
(c) Did you discuss any potential negative societal impacts of your work? [Yes] We wrote "This work does not present any foreseeable societal consequence."
|
| 453 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 454 |
+
|
| 455 |
+
2. If you are including theoretical results...
|
| 456 |
+
|
| 457 |
+
(a) Did you state the full set of assumptions of all theoretical results? [Yes]
|
| 458 |
+
(b) Did you include complete proofs of all theoretical results? [Yes]
|
| 459 |
+
|
| 460 |
+
3. If you ran experiments...
|
| 461 |
+
|
| 462 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes] The code and data are in the supplemental materials.
|
| 463 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes]
|
| 464 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [Yes]
|
| 465 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [Yes]
|
| 466 |
+
|
| 467 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 468 |
+
|
| 469 |
+
(a) If your work uses existing assets, did you cite the creators? [N/A]
|
| 470 |
+
(b) Did you mention the license of the assets? [N/A]
|
| 471 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [N/A]
|
| 472 |
+
|
| 473 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
|
| 474 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [N/A]
|
| 475 |
+
|
| 476 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 477 |
+
|
| 478 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
|
| 479 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 480 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
aconsolidatedcrossvalidationalgorithmforsupportvectormachinesviadatareduction/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c12f6985e0bc5f33c85931fde915cc87704cd5c8b198eb8ca6d73abe1df2ca28
|
| 3 |
+
size 460296
|
aconsolidatedcrossvalidationalgorithmforsupportvectormachinesviadatareduction/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a9b2e20dae655cdbed7362a62314abdd0021bb5aa95d8b8fc6e02f3f4551f74b
|
| 3 |
+
size 611852
|
acontinuoustimeframeworkfordiscretedenoisingmodels/bffa17a7-7231-490e-b95b-b193fd9d4377_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d53532790a79a97be62bcf2525ed578a694fd6238d3782767e557195edde9457
|
| 3 |
+
size 87951
|
acontinuoustimeframeworkfordiscretedenoisingmodels/bffa17a7-7231-490e-b95b-b193fd9d4377_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:305acb29eb906f7a45b5231fd81bed3491bd2d9ce2bc854f16a1dc7d59a07e9b
|
| 3 |
+
size 107638
|