Add Batch d32009b1-c7ec-4f95-b796-23a88c25d595
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- a2efficientautomatedattackerforboostingadversarialtraining/8d9183a3-13f1-42dd-a8cf-24f5895ba2bf_content_list.json +3 -0
- a2efficientautomatedattackerforboostingadversarialtraining/8d9183a3-13f1-42dd-a8cf-24f5895ba2bf_model.json +3 -0
- a2efficientautomatedattackerforboostingadversarialtraining/8d9183a3-13f1-42dd-a8cf-24f5895ba2bf_origin.pdf +3 -0
- a2efficientautomatedattackerforboostingadversarialtraining/full.md +340 -0
- a2efficientautomatedattackerforboostingadversarialtraining/images.zip +3 -0
- a2efficientautomatedattackerforboostingadversarialtraining/layout.json +3 -0
- acceleratedlinearizedlaplaceapproximationforbayesiandeeplearning/7ac12903-1e07-48f3-9786-82b906bc92f3_content_list.json +3 -0
- acceleratedlinearizedlaplaceapproximationforbayesiandeeplearning/7ac12903-1e07-48f3-9786-82b906bc92f3_model.json +3 -0
- acceleratedlinearizedlaplaceapproximationforbayesiandeeplearning/7ac12903-1e07-48f3-9786-82b906bc92f3_origin.pdf +3 -0
- acceleratedlinearizedlaplaceapproximationforbayesiandeeplearning/full.md +455 -0
- acceleratedlinearizedlaplaceapproximationforbayesiandeeplearning/images.zip +3 -0
- acceleratedlinearizedlaplaceapproximationforbayesiandeeplearning/layout.json +3 -0
- acceleratedprimaldualgradientmethodforsmoothandconvexconcavesaddlepointproblemswithbilinearcoupling/773ec92a-9a41-4f29-ba68-dca69d66506e_content_list.json +3 -0
- acceleratedprimaldualgradientmethodforsmoothandconvexconcavesaddlepointproblemswithbilinearcoupling/773ec92a-9a41-4f29-ba68-dca69d66506e_model.json +3 -0
- acceleratedprimaldualgradientmethodforsmoothandconvexconcavesaddlepointproblemswithbilinearcoupling/773ec92a-9a41-4f29-ba68-dca69d66506e_origin.pdf +3 -0
- acceleratedprimaldualgradientmethodforsmoothandconvexconcavesaddlepointproblemswithbilinearcoupling/full.md +513 -0
- acceleratedprimaldualgradientmethodforsmoothandconvexconcavesaddlepointproblemswithbilinearcoupling/images.zip +3 -0
- acceleratedprimaldualgradientmethodforsmoothandconvexconcavesaddlepointproblemswithbilinearcoupling/layout.json +3 -0
- acceleratedprojectedgradientalgorithmsforsparsityconstrainedoptimizationproblems/332a70d0-20c8-4f03-a505-ef6d7184b7b7_content_list.json +3 -0
- acceleratedprojectedgradientalgorithmsforsparsityconstrainedoptimizationproblems/332a70d0-20c8-4f03-a505-ef6d7184b7b7_model.json +3 -0
- acceleratedprojectedgradientalgorithmsforsparsityconstrainedoptimizationproblems/332a70d0-20c8-4f03-a505-ef6d7184b7b7_origin.pdf +3 -0
- acceleratedprojectedgradientalgorithmsforsparsityconstrainedoptimizationproblems/full.md +398 -0
- acceleratedprojectedgradientalgorithmsforsparsityconstrainedoptimizationproblems/images.zip +3 -0
- acceleratedprojectedgradientalgorithmsforsparsityconstrainedoptimizationproblems/layout.json +3 -0
- acceleratedtrainingofphysicsinformedneuralnetworkspinnsusingmeshlessdiscretizations/99deec50-a70f-4f8a-9b8c-7aa43a2634dd_content_list.json +3 -0
- acceleratedtrainingofphysicsinformedneuralnetworkspinnsusingmeshlessdiscretizations/99deec50-a70f-4f8a-9b8c-7aa43a2634dd_model.json +3 -0
- acceleratedtrainingofphysicsinformedneuralnetworkspinnsusingmeshlessdiscretizations/99deec50-a70f-4f8a-9b8c-7aa43a2634dd_origin.pdf +3 -0
- acceleratedtrainingofphysicsinformedneuralnetworkspinnsusingmeshlessdiscretizations/full.md +339 -0
- acceleratedtrainingofphysicsinformedneuralnetworkspinnsusingmeshlessdiscretizations/images.zip +3 -0
- acceleratedtrainingofphysicsinformedneuralnetworkspinnsusingmeshlessdiscretizations/layout.json +3 -0
- acceleratingcertifiedrobustnesstrainingviaknowledgetransfer/f9bcbb19-37bc-4d36-afdb-60a0b24ca6d5_content_list.json +3 -0
- acceleratingcertifiedrobustnesstrainingviaknowledgetransfer/f9bcbb19-37bc-4d36-afdb-60a0b24ca6d5_model.json +3 -0
- acceleratingcertifiedrobustnesstrainingviaknowledgetransfer/f9bcbb19-37bc-4d36-afdb-60a0b24ca6d5_origin.pdf +3 -0
- acceleratingcertifiedrobustnesstrainingviaknowledgetransfer/full.md +311 -0
- acceleratingcertifiedrobustnesstrainingviaknowledgetransfer/images.zip +3 -0
- acceleratingcertifiedrobustnesstrainingviaknowledgetransfer/layout.json +3 -0
- acceleratingsgdforhighlyillconditionedhugescaleonlinematrixcompletion/581f810d-d8d6-46a9-8afa-991db46cc0ac_content_list.json +3 -0
- acceleratingsgdforhighlyillconditionedhugescaleonlinematrixcompletion/581f810d-d8d6-46a9-8afa-991db46cc0ac_model.json +3 -0
- acceleratingsgdforhighlyillconditionedhugescaleonlinematrixcompletion/581f810d-d8d6-46a9-8afa-991db46cc0ac_origin.pdf +3 -0
- acceleratingsgdforhighlyillconditionedhugescaleonlinematrixcompletion/full.md +359 -0
- acceleratingsgdforhighlyillconditionedhugescaleonlinematrixcompletion/images.zip +3 -0
- acceleratingsgdforhighlyillconditionedhugescaleonlinematrixcompletion/layout.json +3 -0
- acceleratingsparseconvolutionwithcolumnvectorwisesparsity/ccf4c0da-5b53-47d7-8863-b256ae5714be_content_list.json +3 -0
- acceleratingsparseconvolutionwithcolumnvectorwisesparsity/ccf4c0da-5b53-47d7-8863-b256ae5714be_model.json +3 -0
- acceleratingsparseconvolutionwithcolumnvectorwisesparsity/ccf4c0da-5b53-47d7-8863-b256ae5714be_origin.pdf +3 -0
- acceleratingsparseconvolutionwithcolumnvectorwisesparsity/full.md +278 -0
- acceleratingsparseconvolutionwithcolumnvectorwisesparsity/images.zip +3 -0
- acceleratingsparseconvolutionwithcolumnvectorwisesparsity/layout.json +3 -0
- accelerationindistributedsparseregression/cc8497c6-8d71-4264-9b32-61dc9f402bc3_content_list.json +3 -0
- accelerationindistributedsparseregression/cc8497c6-8d71-4264-9b32-61dc9f402bc3_model.json +3 -0
a2efficientautomatedattackerforboostingadversarialtraining/8d9183a3-13f1-42dd-a8cf-24f5895ba2bf_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c80cca20b207d481b64454bfbc1bf3e152249f36664c00219c6487007d938db
|
| 3 |
+
size 72997
|
a2efficientautomatedattackerforboostingadversarialtraining/8d9183a3-13f1-42dd-a8cf-24f5895ba2bf_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:22f9038d5787a82985197c53b9cc019e5a6de0062e0bfd44ea8b979d46f507c8
|
| 3 |
+
size 91587
|
a2efficientautomatedattackerforboostingadversarialtraining/8d9183a3-13f1-42dd-a8cf-24f5895ba2bf_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1825bb0eb77e9a013a0b6c4099c2dbeca9272eb5af65030a6c4d7c1d85c24073
|
| 3 |
+
size 897424
|
a2efficientautomatedattackerforboostingadversarialtraining/full.md
ADDED
|
@@ -0,0 +1,340 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# $\mathbf{A}^{2}$ : Efficient Automated Attacker for Boosting Adversarial Training
|
| 2 |
+
|
| 3 |
+
Zhuoer $\mathbf{X}\mathbf{u}^{1,2}$ , Guanghui Zhu $^{1*}$ , Changhua Meng $^{2}$ , Shiwen Cui $^{2}$ , Zhenzhe Ying $^{2}$ , Weiqiang Wang $^{2}$ , Ming Gu $^{2}$ , Yihua Huang $^{1}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ State Key Laboratory for Novel Software Technology, Nanjing University $^{2}$ Tiansuan Lab, Ant Group
|
| 6 |
+
|
| 7 |
+
zhuoer.xu@smail.nju.edu.cn,zgh@nju.edu.cn,
|
| 8 |
+
|
| 9 |
+
{changhua.mch, donn.csw, zhenzhe.yzz, weiqiang.wwq, guming.mg}@antgroup.com,
|
| 10 |
+
|
| 11 |
+
yhuang@nju.edu.cn
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Based on the significant improvement of model robustness by AT (Adversarial Training), various variants have been proposed to further boost the performance. Well-recognized methods have focused on different components of AT (e.g., designing loss functions and leveraging additional unlabeled data). It is generally accepted that stronger perturbations yield more robust models. However, how to generate stronger perturbations efficiently is still missed. In this paper, we propose an efficient automated attacker called $\mathrm{A}^2$ to boost AT by generating the optimal perturbations on-the-fly during training. $\mathrm{A}^2$ is a parameterized automated attacker to search in the attacker space for the best attacker against the defense model and examples. Extensive experiments across different datasets demonstrate that $\mathrm{A}^2$ generates stronger perturbations with low extra cost and reliably improves the robustness of various AT methods against different attacks.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
DNNs (Deep Neural Networks) are extremely vulnerable to imperceptible perturbations despite their success in a wide variety of applications [He et al., 2016, Kenton and Toutanova, 2019, Guo et al., 2017]. In particular, adding small but carefully chosen deviations to the input, called adversarial perturbations, can cause DNNs to make incorrect predictions with high confidence. It indicates that models trained by minimizing the empirical risk are not intrinsically robust. To explicitly improve robustness, AT (Adversarial Training), where a defense model is trained on worst-case adversarial perturbations generated by an attacker, was developed and proved to be effective.
|
| 20 |
+
|
| 21 |
+
Based on the significant improvement of models' robustness, various methods have been proposed, which focus on different components of AT: analyzing the robustness of neural architectures Zagoruyko and Komodakis [2016], designing loss functions such as TRADES [Zhang et al., 2019] and MART [Wang et al., 2019], perturbing the model to regularize the loss landscape's flatness (i.e., AWP Wu et al. [2020]) and leveraging unlabeled data (i.e., RST [Carmon et al., 2019]). Gowal et al. [2020] compares the performance of each combination of most components and achieves the SOTA (state-of-the-art) performance.
|
| 22 |
+
|
| 23 |
+
All the above AT methods use $\mathrm{PGD}^K$ [Madry et al., 2018], which is a $K$ -step stack of FGSM [Goodfellow et al., 2015], as the attacker to generate perturbations for each example against the defense model. As the key of AT, stronger perturbations yield more robust models. However, there is a trade-off between the strength of the perturbation and the training efficiency. Increasing the attack step $K$ strengthens the perturbations, but linearly increases the training overhead [Gowal et al., 2020].
|
| 24 |
+
|
| 25 |
+
Likewise, the huge overhead prevents the use of SOTA adversarial attackers [Croce and Hein, 2020, Yao et al., 2021]. To achieve a balance of robustness and efficiency, manually tuning the attacker (e.g., step size and attack method in each step) is of great concern. R+FGSM [Wong et al., 2019] first randomly initializes a small perturbation, and then applies FGSM with the tuned step size. Surprisingly, AT with R+FGSM is as effective as $\mathrm{PGD}^K$ but has a significantly lower cost.
|
| 26 |
+
|
| 27 |
+
However, given the novel dataset, tuning the attacker manually is a challenging task requiring expert knowledge. Moreover, the best attacker is fine-grained to each example and current model during adversarial training. Manual coarse-grained tuning for the whole training (e.g., fixed attack method and step size for all examples) is sub-optimal and prevents further improvement of robustness.
|
| 28 |
+
|
| 29 |
+
Inspired by AutoML (Automated Machine Learning [Zoph and Le, 2017, Liu et al., 2018]), we propose an efficient automated attacker called $\mathrm{A}^2$ to boost AT. $\mathrm{A}^2$ is a parameterized attacker, which can automatically tune itself on-the-fly during training to generate worst-case perturbations for each example. First, we design a general attacker space by referring to existing attackers. The attacker space is stacked by one-step attacker cells. Each cell consists of a perturbation block and a step size block. Then, we employ a parameterized attacker to search for operations in each block and construct the attacker for each example that maximizes the model loss. Specifically, we leverage the attention mechanism to calculate the score of each operation. For continuous operations, we sum up the operations using the normalized scores as weights. For discrete operations, we use the reparameterization trick [Jang et al., 2017] to sample an operation from the corresponding block. In this way, the constructed attacker generates worst-case adversarial perturbations to train the defense model. Meanwhile, $\mathrm{A}^2$ is differentiable and can be optimized with respect to the model loss by gradient descent.
|
| 30 |
+
|
| 31 |
+
We conduct extensive experiments to verify the effectiveness and efficiency of $\mathrm{A}^2$ . Compared with PGD, $\mathrm{A}^2$ can find better perturbations for different models trained with various AT methods. The results demonstrate that 20-step $\mathrm{A}^2$ generates better perturbations than $\mathrm{PGD}^{100}$ . Moreover, we combine $\mathrm{A}^2$ with other AT variants, and the robustness of models with different architectures on various datasets is generally improved under strong attacks (e.g., classical C&W and SOTA AutoAttack). We also show that $\mathrm{A}^2$ is insensitive to its hyperparameters.
|
| 32 |
+
|
| 33 |
+
To summarize, our main contributions can be highlighted as follows:
|
| 34 |
+
|
| 35 |
+
- We propose an efficient automated attacker called $\mathrm{A}^2$ , which can generate worst-case perturbations on-the-fly during training to improve robustness.
|
| 36 |
+
- In $\mathrm{A}^2$ , we design an attacker space by summarizing the existing attackers and employ a differentiable method to construct the most adversarial attacker for each example according to the attention mechanism.
|
| 37 |
+
- Extensive experimental results across different datasets and neural architectures demonstrate that $\mathrm{A}^2$ improves the model's robustness by generating stronger perturbations in the inner maximization. Moreover, $\mathrm{A}^2$ can be flexibly combined with different AT methods, showing good generality.
|
| 38 |
+
|
| 39 |
+
# 2 Preliminary: Adversarial Training
|
| 40 |
+
|
| 41 |
+
Let $D = (\mathbf{X}, Y) = \{(\mathbf{x}_i, y_i)\}_{i=1}^n$ be a dataset with $\mathbf{x}_i \in \mathbb{R}^d$ as a natural example and $y_i \in \{1, \dots, C\}$ as its associated label. We measure the performance of a DNN classifier $f$ parametrized with $\theta$ using a suitable loss function $l$ , denoted as $\mathbb{E}_{(\mathbf{x}_i, y_i) \in D} [l(f_\theta(\mathbf{x}_i), y_i)]$ . AT [Madry et al., 2018] formulates a saddle point problem whose goal is to find the model parameters $\theta$ that minimize the adversarial risk in the outer minimization (the example's index $i$ is omitted for brevity):
|
| 42 |
+
|
| 43 |
+
$$
|
| 44 |
+
\underbrace {\underset {\theta} {\min } \mathbb {E} _ {(\mathbf {x} , y) \in D} \overbrace {\left[ \underset {\delta \in \mathbb {S}} {\max } l \left(f _ {\theta} (\mathbf {x} + \delta) , y\right) \right]} ^ {\text {i n n e r m a x i m i z a t i o n}}} _ {\text {o u t e r m i n i m i z a t i o n}} \tag {1}
|
| 45 |
+
$$
|
| 46 |
+
|
| 47 |
+
where $\mathbb{S}$ defines the set of allowed perturbations. The perturbation is usually constrained by $L_{p}$ norm with a bound $\epsilon$ , i.e. $\mathbb{S} = \{\delta ||\delta ||_p\leq \epsilon \}$ .
|
| 48 |
+
|
| 49 |
+
The inner maximization aims to find an adversarial perturbation against the example that achieves a high loss for the defense model. However, it is NP-hard to find the optimum of the inner maximization. Various gradient-based attackers have been proposed to approximate its solution, and we classify them according to the number of steps in gradient ascent. One-step attackers [Goodfellow et al., 2015, Miyato et al., 2017] generate adversarial perturbations as:
|
| 50 |
+
|
| 51 |
+
$$
|
| 52 |
+
\delta^ {*} \approx \Pi_ {\mathbb {S}} \eta \cdot \psi (\nabla_ {\mathbf {x}}) \tag {2}
|
| 53 |
+
$$
|
| 54 |
+
|
| 55 |
+
where $\nabla_{\mathbf{x}}$ is short for $\nabla_{\mathbf{x}}l(f_{\theta}(\mathbf{x}),y)$ , $\eta$ is the step size, $\psi$ is a transformation function (e.g., $sgn$ in FGSM and Identity in FGM) and $\Pi$ is the projection. Since such linearization attacks tend to be trapped in the non-smooth vicinity of the data point, R+FGSM initializes a small random perturbation to escape the vicinity and then applies FGSM. As a typical multi-step attacker, $\mathrm{PGD}^K$ [Madry et al., 2018] can find better perturbations by $K$ step gradient ascent:
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
\mathbf {x} ^ {(k)} = \Pi_ {\mathbf {x} + \mathbb {S}} \left(\mathbf {x} ^ {(k - 1)} + \eta \cdot \psi (\nabla_ {\mathbf {x} ^ {(k - 1)}})\right) \tag {3}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
# 3 Methodology
|
| 62 |
+
|
| 63 |
+
# 3.1 Motivation
|
| 64 |
+
|
| 65 |
+
The key of AT is to generate perturbations in the inner maximization. Strong perturbation helps to improve robustness. It is generally accepted that the step $K$ used to solve the inner maximization correlates with the attacker's ability to generate stronger perturbation. However, larger $K$ leads to a linear increase in training overhead. Wong et al. [2019] suggests that with appropriate step size tuning and early stopping, one-step attackers yield models with the robustness that is comparable to much more expensive multi-step attackers. It indicates that hyperparameters, such as random initialization, step size, momentum, and early stopping, affect perturbation generation. From the perspective of effectiveness and efficiency, it is valuable to further improve robustness by tuning the attacker to strengthen perturbations.
|
| 66 |
+
|
| 67 |
+
However, manual tuning of perturbation generation for each example on-the-fly during training is impractical. To address this problem, we propose an efficient automated attacker to boost adversarial training by generating optimal perturbations on-the-fly during training.
|
| 68 |
+
|
| 69 |
+
# 3.2 Problem Formulation
|
| 70 |
+
|
| 71 |
+
Inspired by AutoML, we first design a general attacker space $\mathcal{A}$ by referring to existing attackers. Then, we employ an automated attacker parameterized by $\alpha$ to search in $\mathcal{A}$ and further construct an attacker against the example and the defense model $(\mathbf{x},y,f_{\theta})$ . We abbreviate the perturbation generated by the constructed attacker as $\delta_{\alpha}$ . Therefore, the goal of $\mathrm{A}^2$ is to train a robust model using the perturbation generated by the constructed attackers through a bilevel optimization problem:
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
\min _ {\theta} \mathbb {E} _ {(\mathbf {x}, y) \in D} [ l (f _ {\theta} (\mathbf {x} + \delta_ {\alpha^ {*}}), y) ]
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
s. t. \alpha^ {*} = \underset {\alpha} {\arg \max } \mathbb {E} _ {(\mathbf {x}, y) \in D} [ l (f _ {\theta} (\mathbf {x} + \delta_ {\alpha}), y) ] \tag {4}
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
On the attack side, we train $\alpha$ by SGD to make the defense model misclassify. On the defense side, we use $\alpha^{*}$ to construct the best attacker for each example and then generate perturbations to adversarially train the defense model.
|
| 82 |
+
|
| 83 |
+
# 3.3 Attacker Space
|
| 84 |
+
|
| 85 |
+
Revisiting most attackers, we find that the attacker can be viewed as a stack of one-step attackers consisting of an attack method and a step size. Thus, as shown in Figure 1(a), we design a general attacker space $\mathcal{A}$ consisting of $K$ -step cells. The $k$ -th cell is denoted as $C^{(k)}$ , which is a one-step attacker consisting of the following two blocks:
|
| 86 |
+
|
| 87 |
+
Perturbation Block $O_p$ . Typical attack methods (i.e., FGM and FGSM), attack methods with momentum (i.e., FGMM and FGSMM), random perturbations (i.e., Gaussian and Uniform), and the special Identity which enables the attacker to automatically early stop at a certain step like FAT [Zhang et al., 2020];
|
| 88 |
+
|
| 89 |
+

|
| 90 |
+
(a)
|
| 91 |
+
|
| 92 |
+

|
| 93 |
+
(b)
|
| 94 |
+
Figure 1: (a) Attacker Space of $\mathbf{A}^2$ ; (b) $\mathrm{PGD^{10}}$ : FGSM and a fixed step size 2/255 in each cell; (c) R+FGSM: Gaussian in the first cell, FGSM in the second cell and Identity in the other cells.
|
| 95 |
+
|
| 96 |
+

|
| 97 |
+
(c)
|
| 98 |
+
|
| 99 |
+
Step Size Block $O_{s}$ . $\{10^{-4} \cdot \eta, 10^{-3} \cdot \eta, 10^{-2} \cdot \eta, 10^{-1} \cdot \eta, \eta\}$ , where $\eta$ is a hyperparameter related to the space of allowed perturbations $\mathbb{S}$ .
|
| 100 |
+
|
| 101 |
+
Each block $O$ contains multiple operations. Let $o(\cdot)$ denote the operation, $\gamma^{(k)} = [\gamma_{O_p}^{(k)},\gamma_{O_s}^{(k)}]$ denote the choice of operation in the $k$ -th cell. The attack methods within $\mathcal{O}_p^{(k)}$ are mutually exclusive. Thus, $\gamma_{O_p}^{(k)}$ is a one-hot vector. In contrast, the operations within $\mathcal{O}_s^{(k)}$ are continuous. $\gamma_{O_s}^{(k)}$ is a normalized continuous vector, where each element represents the selection probability. To unify the categorical choice of attack methods and the probabilities over step sizes, the output of $O^{(k)}$ is expressed as a mixture based on $\gamma_{O}^{(k)}$ :
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
\bar {O} \left(\gamma_ {O} ^ {(k)}, \nabla_ {\mathbf {x} ^ {(k - 1)}}\right) = \sum_ {o \in O ^ {(k)}} \gamma_ {o} \cdot o \left(\nabla_ {\mathbf {x} ^ {(k - 1)}}\right) \tag {5}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
where $\gamma_{o}$ denotes the weight of the operation $o$ in $\gamma_{O}^{(k)}$ . Correspondingly, the one-step attacker of the $k$ -th cell can be expressed as the joint of two blocks:
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\bar {C} \left(\gamma^ {(k)}, \nabla_ {\mathbf {x} ^ {(k - 1)}}\right) = \bar {O} _ {s} \left(\gamma_ {O _ {s}} ^ {(k)}, \nabla_ {\mathbf {x} ^ {(k - 1)}}\right) \cdot \bar {O} _ {p} \left(\gamma_ {O _ {p}} ^ {(k)}, \nabla_ {\mathbf {x} ^ {(k - 1)}}\right) \tag {6}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
Moreover, the constructed attacker is a composition of attackers from each cell. In this way, we can cover common attackers in our space. For example, as shown in Figure 1(b), $\mathrm{PGD}^K$ is obtained by selecting FGSM in each perturbation block. R+FGSM in Figure 1(c) is a case of selecting Gaussian in the first cell, FGSM in the second cell and Identity in the other cells.
|
| 114 |
+
|
| 115 |
+
Analysis of $\mathcal{A}$ Considering there exist 7 attack methods in $O_{p}$ of each step, there are $7^{K}$ combinations of attack methods in the $K$ -step attacker space. The exponential increasing combinations prevent the brute-force search. Moreover, the continuous step size $O_{s}$ is also part of the attacker space. Thus, we propose $\mathrm{A}^2$ to search for the best attacker in $\mathcal{A}$ and generate adversarial perturbations efficiently.
|
| 116 |
+
|
| 117 |
+
# 3.4 Automated Attacker $\mathbf{A}^2$
|
| 118 |
+
|
| 119 |
+
$\mathbf{A}^2$ is used to construct the best attacker against $(\mathbf{x},y,f_{\theta})$ and its trainable parameters $\alpha$ include $W_{O_p}^{(k)}$ , $W_{O_s}^{(k)}$ , and $W_{q}^{(k)}$ where $k\in \{1,\dots ,K\}$ . As shown in Figure 1(a), we treat the current model and example as a query and the candidate operations as keys. Thus, the attention mechanism can be used to calculate the scores of operations within each block and the operations are selected based on their scores. Specifically, in the $k$ -th cell, we take the gradient of the last step $\nabla_{\mathbf{x}^{(k - 1)}}l\left(f_{\theta}\left(\mathbf{x}^{(k - 1)}\right),y\right)$ as input and project it to a vector space as the query using $W_{q}^{(k)}$ . Then, we use the trainable embedding table $W_{Q}^{(k)}$ to convert the individual operations within $O^{(k)}$ to continuous keys. With the Scaled Dot-Product Attention [Vaswani et al., 2017], we compute the dot products of the query with each key as the score of the operation $o\in O$ in the $k$ -th cell:
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
e _ {o} ^ {(k)} = \left(\nabla_ {\mathbf {x} ^ {(k - 1)}} W _ {q} ^ {(k)}\right) ^ {T} W _ {o} ^ {(k)} \tag {7}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
Perturbation Block. The operations within $O_{p}^{(k)}$ are mutually exclusive. We sample an operation with the normalized scores as probabilities, i.e., $\gamma_{O_p}^{(k)}\sim \mathrm{softmax}(e_{O_p}^{(k)})$ .
|
| 126 |
+
|
| 127 |
+
Step Size Block. As the operations within $O_{s}^{(k)}$ are continuous values, we sum up the individual step sizes with the normalized scores as weights. For $o_{s} \in O_{s}^{(k)}$ , the weight can be expressed as:
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
\gamma_ {o _ {s}} ^ {(k)} = \frac {\exp \left(e _ {o _ {s}} ^ {(k)}\right)}{\sum_ {o ^ {\prime} \in O _ {s} ^ {(k)}} \exp \left(e _ {o ^ {\prime}} ^ {(k)}\right)} \tag {8}
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
# 3.5 Training of Automated Attacker
|
| 134 |
+
|
| 135 |
+
As mentioned above, we train $\mathrm{A}^2$ to minimize the following objective by gradient descent:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\alpha^ {*} = \underset {\alpha} {\arg \min } - \mathbb {E} _ {(\mathbf {x}, y) \in D} [ l (f _ {\theta} (\mathbf {x} + \delta_ {\alpha}), y) ] \tag {9}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
However, as a result of constructing the attacker by sampling in each perturbation block, the gradient of the loss w.r.t $\gamma_{O_p}$ is zero. To train $\alpha$ , we use the reparameterization trick [Kingma and Welling, 2013] to transfer the randomness of sampling to the auxiliary noise and reformulate the objective function. For brevity, we omit the step index $k$ .
|
| 142 |
+
|
| 143 |
+
Let $\gamma_{O_p} = \phi (\kappa ,e_{O_p})$ be a differentiable transformation where $\kappa$ is an auxiliary noise variable with independent marginal $p(\kappa)$ . In $\mathbf{A}^2$ , we sample noise from Gumbel Distribution, i.e., $\kappa \sim Gumbel(0)$ [Gumbel, 1954], and use Gumbel Softmax [Jang et al., 2017] as $\phi$ to smoothly approximate the expectation of loss [Maddison et al., 2014]. Specifically, $\phi (\kappa ,e_{O_p}) = \text{softmax}\left((e_{O_p} + \kappa) / \tau\right)$ where $\tau$ is the temperature parameter. When $\tau \to 0$ , the generated samples have the same distribution as one-hot( $\arg \max_{o_p\in O_p}(e_{o_p} + \kappa_{o_p}))$ .
|
| 144 |
+
|
| 145 |
+
Using the reparameterization trick, we can now form MC (Monte Carlo) estimates of the expectation of $\mathrm{A}^2$ 's loss $l$ for each example, which is differentiable, as follows:
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
\begin{array}{l} l \left(f _ {\theta} (\mathbf {x} + \delta_ {\alpha}), y\right) \\ = \mathbb {E} _ {\gamma_ {O _ {p}} \sim s o f t m a x (e _ {O _ {p}})} \left[ l \left(f _ {\theta} (\mathbf {x} + \bar {C} ([ \gamma_ {O _ {p}}, \gamma_ {O _ {s}} ], \nabla_ {\mathbf {x}}), y)\right) \right] \\ = \mathbb {E} _ {p (\kappa)} \left[ l \left(f _ {\theta} \left(\mathbf {x} + \bar {C} \left([ \phi (\kappa , e _ {O _ {p}}), \gamma_ {O _ {s}} ], \nabla_ {\mathbf {x}}\right), y\right) \right] \right. \tag {10} \\ \approx \frac {1}{M} \sum_ {m = 1} ^ {M} l \left(f _ {\theta} \left(\mathbf {x} + \bar {C} \big ([ \phi (\boldsymbol {\kappa} ^ {(m)}, e _ {O _ {p}}), \gamma_ {O _ {s}} ], \nabla_ {\mathbf {x}}) \big), y\right) \right. \\ \end{array}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+
where $M$ is the number of samples. In practice, $M = 1$ can achieve good performance. In this way, we reformulate the MC approximation in Equation (10) of $l$ as the objective function $\hat{l}$ .
|
| 152 |
+
|
| 153 |
+
Moreover, training $\alpha$ to convergence in each epoch can be prohibitive due to the expensive inner maximization in Equation (4). We use a simple approximation scheme following the common methods [Finn et al., 2017, Liu et al., 2018]:
|
| 154 |
+
|
| 155 |
+
$$
|
| 156 |
+
\alpha^ {*} \approx \alpha + \xi \nabla_ {\alpha} \mathbb {E} _ {(\mathbf {x}, y) \in D} [ \hat {l} (f _ {\theta} (\mathbf {x} + \delta_ {\alpha}), y) ] \tag {11}
|
| 157 |
+
$$
|
| 158 |
+
|
| 159 |
+
where $\alpha$ denotes the current weights of the attacker and $\xi$ is the learning rate.
|
| 160 |
+
|
| 161 |
+
# 3.6 Framework of Adversarial Training with $\mathbf{A}^2$
|
| 162 |
+
|
| 163 |
+
The overall procedure is shown in Algorithm 1. As in normal adversarial training, we generate perturbations in $K$ steps every batch and update the model parameters. The key difference is in Line 7. Benefiting from a parameterized automated attacker, we tune the discrete attack methods and continuous step sizes to generate adversarial perturbations. After optimizing the model parameters, we use Equation (11) to update $\alpha$ as an approximation to $\alpha^{*}$ . Since $\mathrm{A}^2$ focus on the inner maximization, it can be compatible with most adversarial training methods. For example, it is flexible to use the loss function of TRADES or MART for outer minimization in Line 10 (i.e., TRADES-A $^2$ and MART-A $^2$ ), or include early stopping in Line 5~9 as FAT.
|
| 164 |
+
|
| 165 |
+
Algorithm 1 Adversarial Training with Automated Attacker (AT-A²)
|
| 166 |
+
Input: Training examples $D$ , perturbation bound $\epsilon$ , the number of attack steps $K$
|
| 167 |
+
1: Initialize $\theta, \alpha$ ;
|
| 168 |
+
2: for epoch = 1, ..., $N_{ep}$ do
|
| 169 |
+
3: for minibatch $(\mathbf{X}, Y) \subset D$ do
|
| 170 |
+
4: $\mathbf{X}^{(0)} \gets \mathbf{X}$ ;
|
| 171 |
+
5: for $k = 1, \dots, K$ do
|
| 172 |
+
6: Calculate the gradient $\nabla_{\mathbf{X}^{(k-1)}}$ ;
|
| 173 |
+
7: Construct $\delta_{\alpha}^{(k)} \in \mathbb{S}$ according to $\nabla_{\mathbf{X}^{(k-1)}}$ by $g_{\alpha}$ ;
|
| 174 |
+
8: $\mathbf{X}^{(k)} = \mathbf{X}^{(k-1)} + \delta_{\alpha}^{(k)}$ ;
|
| 175 |
+
9: end for
|
| 176 |
+
10: Update $\theta$ with $\nabla_{\theta} \sum_{(\mathbf{x}, y)} l(f_{\theta}(\mathbf{x}^{(K)}), y)$ ;
|
| 177 |
+
11: Update $\alpha$ by Equation (11);
|
| 178 |
+
12: end for
|
| 179 |
+
13: end for
|
| 180 |
+
|
| 181 |
+
# 4 Experiments
|
| 182 |
+
|
| 183 |
+
We conduct extensive experiments on public datasets to answer the following questions: 1) Can $\mathbf{A}^2$ generate stronger adversarial perturbations? 2) How effective is the adversarial training with $\mathbf{A}^2$ ? 3) Is $\mathbf{A}^2$ robust to hyperparameters? All experiments are run using GeForce RTX 3090 (GPU) and Intel(R) Xeon(R) Silver 4210 (CPU) instances.
|
| 184 |
+
|
| 185 |
+
# 4.1 Effectiveness of Automated Attacker (RQ1)
|
| 186 |
+
|
| 187 |
+
In this part, we fix the model $f_{\theta}$ , train the automated attacker alone and investigate whether $\mathrm{A}^2$ can generate more powerful perturbations compared to the commonly used PGD.
|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
(a) ResNet-18 trained by MART
|
| 191 |
+
|
| 192 |
+

|
| 193 |
+
(b) WRN-34-10 trained by TRADES-AWP
|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
(c) WRN-34-10 trained by MART-AWP
|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
(d) WRN-28-10 trained by RST-AWP
|
| 200 |
+
Figure 2: Effect of adversarial perturbations generated by $\mathbf{A}^2$ with the training epoch.
|
| 201 |
+
|
| 202 |
+
Experimental Settings. To demonstrate the generality, we choose different neural architectures (i.e., ResNet-18, WRN-34-10, and WRN-28-10) trained on CIFAR-10 by various AT methods: TRADES, MART, RST, and AWP. All trained models are open-source checkpoints. We choose $\mathrm{PGD}^K$ with a random start $\delta^{(0)}\sim \text{Uniform}(-\epsilon,\epsilon)$ as baseline. All attacks are $L_{\infty}$ -bounded with a total perturbation scale of $\epsilon = 8 / 255$ . Since more attack steps generally improve the attack effect, we compare the automated attacker with PGD at different steps. Moreover, we try different step
|
| 203 |
+
|
| 204 |
+
Table 1: Comparison of attack effects (\%, the lower the better) of multi-step PGD and $\mathbf{A}^2$ in robust models. We run each method 5 times and show the average. The standard deviations are omitted as they are very small. The architecture of all defense models is WideResNet, except for MART whose architecture is ResNet-18.
|
| 205 |
+
|
| 206 |
+
<table><tr><td rowspan="2">Defense</td><td rowspan="2">Natural</td><td colspan="3">10-step</td><td colspan="3">20-step</td><td rowspan="2">\( PGD^{100} \)</td></tr><tr><td>PGD</td><td>\( A^2 \)</td><td>\( A^{2}_{\dagger} \)</td><td>PGD</td><td>\( A^2 \)</td><td>\( A^{2}_{\dagger} \)</td></tr><tr><td>\( MART^0 \)</td><td>83.07</td><td>54.78</td><td>54.09</td><td>53.65</td><td>53.76</td><td>53.52</td><td>53.24</td><td>53.28</td></tr><tr><td>\( TRADES-AWP^1 \)</td><td>85.36</td><td>60.22</td><td>59.67</td><td>59.60</td><td>59.64</td><td>59.38</td><td>59.34</td><td>59.49</td></tr><tr><td>\( MART-AWP^1 \)</td><td>85.60</td><td>60.38</td><td>59.76</td><td>59.51</td><td>59.52</td><td>59.42</td><td>59.25</td><td>59.29</td></tr><tr><td>\( RST-AWP^1 \)</td><td>88.25</td><td>64.68</td><td>64.27</td><td>64.17</td><td>64.14</td><td>64.02</td><td>63.97</td><td>64.03</td></tr></table>
|
| 207 |
+
|
| 208 |
+
size blocks, i.e., $\eta$ , in $\mathbf{A}^2$ : 1) $\eta = 2/255$ , which is the same as the setting of step size in PGD; 2) $\eta = 8/255$ , which is indicated by $\mathbf{A}^{2}_{\dagger}$ and allows $\mathbf{A}^2$ to search the whole $\epsilon$ bound each step. $\mathbf{A}^2$ is trained using Adam [Kingma and Ba, 2015] with learning rate $10^{-3}$ , weight decay $10^{-2}$ and other default hyperparameters.
|
| 209 |
+
|
| 210 |
+
Attack Effect. The training process of $\mathrm{A}^2$ is shown in Figure 2. We take the first 20 epochs of the attack effects, compare them with PGD, and observe whether $\mathrm{A}^2$ converges. In the early training stage, the random combinations of attack operations are much less effective. After $10\sim 20$ epochs, the effect of generated attacks is much more stable and effective. In practical automated adversarial training, the model and the automated attacker are trained iteratively. The fast convergence of $\mathrm{A}^2$ ensures that the generated perturbations are strong enough. In addition, a larger $\eta$ achieves better attacks. As the steps increase, the effect diminishes and the training may fluctuate.
|
| 211 |
+
|
| 212 |
+
Table 1 reports the attack effects of PGD and $\mathrm{A}^2$ with different steps. In the comparison of $\mathrm{A}^2$ and PGD with different steps $K\in \{10,20\}$ , $\mathrm{A}^2$ stably outperforms PGD. $\mathrm{A}^{2}_{\dagger}$ constructs stronger attacks in the expanded search space. With the increase of steps, $\mathrm{A}^2$ is more effective due to the combination of attack methods and the automated step size tuning. Due to the diminishing marginal effect, $\mathrm{PGD}^{100}$ v.s. $\mathrm{PGD}^{20}$ achieves less improvement than $\mathrm{PGD}^{20}$ v.s. $\mathrm{PGD}^{10}$ . At $1 / 5$ of the cost, the 20-step $\mathrm{A}^{2}_{\dagger}$ finds better attacks using the optimized $\alpha$ than $\mathrm{PGD}^{100}$ . In summary, Table 1 verifies that $\mathrm{A}^2$ stably outperforms PGD for the same step, and obtains better attacks compared to PGD, whose step size and attack method are fixed, with significantly lower cost.
|
| 213 |
+
|
| 214 |
+
Overhead Analysis. The overhead of $\mathrm{A}^2$ is not significant compared to PGD. Both methods are close in terms of clock time. For WRN-34, PGD takes 19.75/147.09/287.76 seconds to generate 1/10/20 step attacks respectively. It demonstrates that more inner steps lead to a linear increase in time. Meanwhile, $\mathrm{A}^2$ takes 157.61/302.51 seconds to generate the 10/20 step attack respectively. The main overhead remains in the forward computation and backward propagation of the defense model. Moreover, Section A.3 in Appendix shows that the total parameter size of $\mathrm{A}^2$ is also acceptable.
|
| 215 |
+
|
| 216 |
+
# 4.2 Effectiveness of Adversarial Training with $\mathbf{A}^2$ (RQ2)
|
| 217 |
+
|
| 218 |
+
In this part, we evaluate the robustness of our proposed AT-A $^2$ on different datasets against white-box and ensemble attacks. To verify that the stronger attacks generated by A $^2$ on-the-fly during training can improve robustness, we consider various adversarial training methods (i.e., AT, TRADES, MART, and AWP) without additional data across different datasets.
|
| 219 |
+
|
| 220 |
+
Benchmark. We conduct experiments on the baseline AT and the SOTA AWP with $\mathrm{A}^2$ across three benchmark datasets to verify the generalization of $\mathrm{A}^2$ . We follow the settings in AWP: PreActResNet-18 trained for 200 epochs, $\epsilon = 8/255$ and $\gamma = 10^{-2}$ for AWP. The step size is 1/255 for SVHN and 2/255 for CIFAR-10 and CIFAR-100. For AT and AWP, the attacker used in training is $\mathrm{PGD}^{10}$ . The 10-step $\mathrm{A}^2$ is trained with the same setting as in RQ1. $\mathrm{PGD}^{20}$ is used for testing, and the test robustness is reported in Table 2. It shows that $\mathrm{A}^2$ , as a component focusing on the inner maximization,
|
| 221 |
+
|
| 222 |
+
Table 2: Test robustness (\%, the higher the better) using PreActResNet-18 under $L_{\infty}$ threat model ("Best" means the highest robustness while "Last" means the robustness at the last epoch). Std. of 5 runs is omitted due to being small.
|
| 223 |
+
|
| 224 |
+
<table><tr><td rowspan="2">Defense</td><td colspan="2">SVHN</td><td colspan="2">CIFAR-10</td><td colspan="2">CIFAR-100</td></tr><tr><td>Best</td><td>Last</td><td>Best</td><td>Last</td><td>Best</td><td>Last</td></tr><tr><td>AT</td><td>53.36</td><td>44.49</td><td>52.79</td><td>44.44</td><td>27.22</td><td>20.82</td></tr><tr><td>AT-A2</td><td>56.76</td><td>44.75</td><td>52.96</td><td>44.59</td><td>28.14</td><td>20.28</td></tr><tr><td>AWP</td><td>59.12</td><td>55.87</td><td>55.39</td><td>54.73</td><td>30.71</td><td>30.28</td></tr><tr><td>AWP-A2</td><td>61.42</td><td>58.45</td><td>55.71</td><td>55.31</td><td>31.36</td><td>30.73</td></tr></table>
|
| 225 |
+
|
| 226 |
+
Table 3: Test robustness (\%, the higher the better) on CIFAR-10 using WRN-34-10 under $L_{\infty}$ threat model ("Natural" denotes the accuracy on nature examples, and other columns indicate the accuracy on adversarial examples generated by different attacks). Std. of 5 runs is omitted due to being small.
|
| 227 |
+
|
| 228 |
+
<table><tr><td>Defense</td><td>Natural</td><td>FGSM</td><td>PGD20</td><td>CW∞</td><td>AutoAttack</td></tr><tr><td>AT</td><td>87.30</td><td>56.10</td><td>52.68</td><td>50.73</td><td>47.04</td></tr><tr><td>AT-A2</td><td>84.54</td><td>63.72</td><td>54.68</td><td>51.17</td><td>48.36</td></tr><tr><td>TRADES</td><td>84.65</td><td>61.32</td><td>56.33</td><td>54.20</td><td>53.08</td></tr><tr><td>TRADES-A2</td><td>85.54</td><td>65.93</td><td>59.84</td><td>56.61</td><td>55.03</td></tr><tr><td>MART</td><td>84.17</td><td>61.61</td><td>57.88</td><td>54.58</td><td>51.10</td></tr><tr><td>MART-A2</td><td>84.53</td><td>63.73</td><td>59.57</td><td>54.66</td><td>52.38</td></tr><tr><td>AWP</td><td>85.57</td><td>62.90</td><td>58.14</td><td>55.96</td><td>54.04</td></tr><tr><td>AWP-A2</td><td>87.54</td><td>64.70</td><td>59.50</td><td>57.42</td><td>54.86</td></tr></table>
|
| 229 |
+
|
| 230 |
+
achieves better results on most datasets. Moreover, $\mathbf{A}^2$ is generic and can boost the robustness of both baseline and SOTA AT methods.
|
| 231 |
+
|
| 232 |
+
Robustness on WideResNet. Furthermore, we train WRN-34-10 on CIFAR-10 with various AT methods (i.e., AT, TRADES, MART, and AWP) following their original papers and open-source codes<sup>2</sup>. All defense models are trained using SGD with momentum 0.9, weight decay $5 \times 10^{-4}$ , and an initial learning rate of 0.1 that is divided by 10 at the $50\%$ th and $75\%$ th epoch. Except for 200 epochs in AWP, other AT methods train the model for 120 epochs. Simple data augmentations (i.e., $32 \times 32$ random crop with 4-pixel padding and random horizontal flip) are applied.
|
| 233 |
+
|
| 234 |
+
For white-box attack, we test FGSM, $\mathrm{PGD}^{20}$ and $\mathrm{CW}_{\infty}$ [Carlini and Wagner, 2017]. In addition, we test the robustness against the standard AutoAttack [Croce and Hein, 2020], which is a strong and reliable attacker to verify the robustness via an ensemble of diverse parameter-free attacks including three white-box attackers and a black-box attacker. Table 3 shows that $\mathrm{A}^2$ reliably boosts AT variants against white-box and ensemble attacks. This verifies that $\mathrm{A}^2$ is general for AT and improves adversarial robustness reliably rather than gradient obfuscation or masking.
|
| 235 |
+
|
| 236 |
+
Additionally, given the nature examples, AT performs better than $\mathrm{AT - A^2}$ . The main reason is that $\mathrm{A^2}$ generates stronger perturbation for better robustness, which decreases the accuracy (i.e., 84.54). Many works (e.g., TRADES, MART, and AWP) use regularization to achieve the trade-off between robustness and accuracy. The regularization is also used to optimize the automated attacker. Thus, for other AT methods in Table 3, combining $\mathrm{A^2}$ can achieve higher accuracy. Moreover, for WRN-34, the training time of $\mathrm{AWP - A^2}$ is 970 s/epoch while the training time of AWP is 920 s/epoch. Thus, the additional overhead of $\mathrm{A^2}$ is not significant.
|
| 237 |
+
|
| 238 |
+
# 4.3 Hyperparameters of $\mathbf{A}^2$ (RQ3)
|
| 239 |
+
|
| 240 |
+
The hyperparameters of $\mathrm{A}^2$ include the training hyperparameters and the design of the attacker space. The comparison of the attack effect with different hyperparameters is shown in Table 4. Overall, $\mathrm{A}^2$ is robust to hyperparameters and performs better than $\mathrm{PGD}^{10}$ and closely to $\mathrm{PGD}^{20}$ .
|
| 241 |
+
|
| 242 |
+
Table 4: Comparison of attack effects (\%, the lower the better) of 10-step $\mathrm{A}^2$ with different hyperparameters (The 10-step is omitted, $\mathrm{A}^2_{p,q}$ is short for training the attacker using learning rate $\xi = 10^{-p}$ with the step size block $\eta = q / 255$ ,). Std. of 5 runs is omitted due to being small.
|
| 243 |
+
|
| 244 |
+
<table><tr><td>Attack</td><td>TRADES-AWP1</td><td>MART-AWP1</td><td>RST-AWP1</td></tr><tr><td>PGD10</td><td>60.22</td><td>60.38</td><td>64.68</td></tr><tr><td>PGD20</td><td>59.64</td><td>59.52</td><td>64.14</td></tr><tr><td>A23,2</td><td>59.67</td><td>59.76</td><td>64.27</td></tr><tr><td>A22,2</td><td>59.93</td><td>59.87</td><td>64.34</td></tr><tr><td>A24,2</td><td>59.76</td><td>59.78</td><td>64.29</td></tr><tr><td>A23,5</td><td>59.49</td><td>59.62</td><td>64.11</td></tr><tr><td>A23,8</td><td>59.53</td><td>59.53</td><td>64.17</td></tr></table>
|
| 245 |
+
|
| 246 |
+
Training of $\mathbf{A}^2$ . The effect of attacks with different learning rates $\xi$ is shown in the middle two rows of Table 4. Although Adam uses a dynamic learning rate, an excessive initial learning rate (i.e., $10^{-2}$ ) leads to sub-optimal.
|
| 247 |
+
|
| 248 |
+
Attacker Space. The influence of the attack step $K$ has been investigated in RQ1. As shown in the last two rows of Table 4, a larger step size $\eta$ increases the effectiveness of $\mathrm{A}^2$ . However, as shown in the training curve in Figure 2, larger $\eta$ introduces instability in the training of the attacker.
|
| 249 |
+
|
| 250 |
+
# 5 Related Work
|
| 251 |
+
|
| 252 |
+
# 5.1 Adversarial Learning
|
| 253 |
+
|
| 254 |
+
Many recent works [Goodfellow et al., 2015, Carlini and Wagner, 2017, Croce and Hein, 2020] have shown that DNNs are vulnerable to adversarial examples. Various defense strategies and models have been proposed to deal with the threat of adversarial examples. However, as proved in C&W [Carlini and Wagner, 2017], many works mistake gradient obfuscation or masking for adversarial robustness. AT [Madry et al., 2018] formulates a class of adversarial training methods for solving a saddle point problem (i.e., Equation (1)) and improves robustness reliably.
|
| 255 |
+
|
| 256 |
+
Based on AT, many works [Zhang et al., 2019, Wang et al., 2019, Wu et al., 2020] focusing on the components of outer minimization are introduced to further enhance performance. The inner maximization is also the goal of the adversarial attack, where $l$ is the 0-1 loss. Many works, e.g., FGSM [Goodfellow et al., 2015], C&W [Carlini and Wagner, 2017] and AutoAttack [Croce and Hein, 2020], have been proposed to attack DNNs and facilitate the development of adversarial training.
|
| 257 |
+
|
| 258 |
+
# 5.2 Automated Machine Learning
|
| 259 |
+
|
| 260 |
+
AutoML [Bergstra et al., 2011, Zoph and Le, 2017, Liu et al., 2018, Cubuk et al., 2019] aims to automate the parts of the machine learning pipeline that require expert solutions. For a particular domain, it is common practice to summarize a large search space of parameters and configurations based on expert experience and search for the optimal solutions using methods such as black-box optimization. During the search process, a certain metric is required to evaluate each solution. The same idea can be applied to adversarial learning. $A^3$ [Yao et al., 2021], which is also closely related to AutoML, automatically discovers an effective attacker on a given model.
|
| 261 |
+
|
| 262 |
+
# 6 Conclusion
|
| 263 |
+
|
| 264 |
+
In this work, we proposed $\mathrm{A}^2$ , to the best of our knowledge, the first adversarial training method which focuses on automated perturbation generation. In $\mathrm{A}^2$ , the attacker space is designed by summarizing the existing perturbations. Moreover, the parameterized automated attacker leverages the attention mechanism to choose the discrete attack method and the continuous step size and further generates adversarial perturbations. During training, the one-step approximation of the optimal automated attacker is used to generate the optimal perturbations on-the-fly for the model. The experimental
|
| 265 |
+
|
| 266 |
+
results show that $\mathrm{A}^2$ generates stronger attacks with low extra cost and boosts the robustness of various AT methods reliably.
|
| 267 |
+
|
| 268 |
+
For future work, we plan to add the target loss of the inner maximization to the attacker space. We also plan to apply $A^2$ to enhance adversarial training for Natural Language Processing.
|
| 269 |
+
|
| 270 |
+
# Acknowledgments and Disclosure of Funding
|
| 271 |
+
|
| 272 |
+
This work was supported by the National Natural Science Foundation of China (#62102177 and #U1811461), the Natural Science Foundation of Jiangsu Province (#BK20210181), the Key Research and Development Program of Jiangsu Province (#BE2021729), Ant Group through Ant Research Program, and the Collaborative Innovation Center of Novel Software Technology and Industrialization, Jiangsu, China.
|
| 273 |
+
|
| 274 |
+
# References
|
| 275 |
+
|
| 276 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, 2016.
|
| 277 |
+
Jacob Devlin Ming-Wei Chang Kenton and Lee Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of NAACL-HLT, 2019.
|
| 278 |
+
Huifeng Guo, Ruiming Tang, Yunming Ye, Zhenguo Li, and Xiuqiang He. Deepfm: a factorization-machine based neural network for ctr prediction. In Proc. of IJCAI, 2017.
|
| 279 |
+
Sergey Zagoruyko and Nikos Komodakis. Wide residual networks. In BMVC, 2016.
|
| 280 |
+
Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric Xing, Laurent El Ghaoui, and Michael Jordan. Theoretically principled trade-off between robustness and accuracy. In Proc. of ICML, 2019.
|
| 281 |
+
Yisen Wang, Difan Zou, Jinfeng Yi, James Bailey, Xingjun Ma, and Quanquan Gu. Improving adversarial robustness requires revisiting misclassified examples. In Proc. of ICLR, 2019.
|
| 282 |
+
Dongxian Wu, Shu-Tao Xia, and Yisen Wang. Adversarial weight perturbation helps robust generalization. Advances in Neural Information Processing Systems, 2020.
|
| 283 |
+
Yair Carmon, Aditi Raghunathan, Ludwig Schmidt, Percy Liang, and John C Duchi. Unlabeled data improves adversarial robustness. In Proc. of ICONIP, 2019.
|
| 284 |
+
Sven Gowal, Chongli Qin, Jonathan Uesato, Timothy Mann, and Pushmeet Kohli. Uncovering the limits of adversarial training against norm-bounded adversarial examples. arXiv preprint arXiv:2010.03593, 2020.
|
| 285 |
+
Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In Proc. of ICLR, 2018.
|
| 286 |
+
Ian Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. In Proc. of ICLR, 2015.
|
| 287 |
+
Francesco Croce and Matthias Hein. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In International conference on machine learning, 2020.
|
| 288 |
+
Chengyuan Yao, Pavol Bielik, Petar Tsankov, and Martin Vechev. Automated discovery of adaptive attacks on adversarial defenses. arXiv preprint arXiv:2102.11860, 2021.
|
| 289 |
+
Eric Wong, Leslie Rice, and J Zico Kolter. Fast is better than free: Revisiting adversarial training. In Proc. of ICLR, 2019.
|
| 290 |
+
Barret Zoph and Quoc V. Le. Neural architecture search with reinforcement learning. In Proc. of ICLR, 2017.
|
| 291 |
+
Hanxiao Liu, Karen Simonyan, and Yiming Yang. Darts: Differentiable architecture search. In Proc. of ICLR, 2018.
|
| 292 |
+
|
| 293 |
+
Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. 2017.
|
| 294 |
+
Takeru Miyato, Andrew M Dai, and Ian Goodfellow. Adversarial training methods for semi-supervised text classification. In Proc. of ICLR, 2017.
|
| 295 |
+
Jingfeng Zhang, Xilie Xu, Bo Han, Gang Niu, Lizhen Cui, Masashi Sugiyama, and Mohan Kankanhalli. Attacks which do not kill training make adversarial learning stronger. In ICML, 2020.
|
| 296 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, 2017.
|
| 297 |
+
Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.
|
| 298 |
+
Emil Julius Gumbel. Statistical theory of extreme values and some practical applications: a series of lectures. US Government Printing Office, 1954.
|
| 299 |
+
Chris J Maddison, Daniel Tarlow, and Tom Minka. A* sampling. Advances in Neural Information Processing Systems, 2014.
|
| 300 |
+
Chelsea Finn, Pieter Abbeel, and Sergey Levine. Model-agnostic meta-learning for fast adaptation of deep networks. In Proc. of ICML, 2017.
|
| 301 |
+
Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Proc. of ICLR, 2015.
|
| 302 |
+
Nicholas Carlini and David Wagner. Towards evaluating the robustness of neural networks. In 2017, iiee symposium on security and privacy (sp), 2017.
|
| 303 |
+
James Bergstra, Rémi Bardenet, Yoshua Bengio, and Balázs Kégl. Algorithms for hyper-parameter optimization. Advances in neural information processing systems, 2011.
|
| 304 |
+
Ekin D Cubuk, Barret Zoph, Dandelion Mane, Vijay Vasudevan, and Quoc V Le. Autoaugment: Learning augmentation strategies from data. In Proc. of CVPR, 2019.
|
| 305 |
+
|
| 306 |
+
# Checklist
|
| 307 |
+
|
| 308 |
+
1. For all authors...
|
| 309 |
+
|
| 310 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 311 |
+
(b) Did you describe the limitations of your work? [Yes]
|
| 312 |
+
(c) Did you discuss any potential negative societal impacts of your work? [No]
|
| 313 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 314 |
+
|
| 315 |
+
2. If you are including theoretical results...
|
| 316 |
+
|
| 317 |
+
(a) Did you state the full set of assumptions of all theoretical results? [N/A]
|
| 318 |
+
(b) Did you include complete proofs of all theoretical results? [N/A]
|
| 319 |
+
|
| 320 |
+
3. If you ran experiments...
|
| 321 |
+
|
| 322 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes]
|
| 323 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes]
|
| 324 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [Yes] The standard deviations are omitted as they are very small
|
| 325 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [Yes] See Appendix.
|
| 326 |
+
|
| 327 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 328 |
+
|
| 329 |
+
(a) If your work uses existing assets, did you cite the creators? [Yes]
|
| 330 |
+
(b) Did you mention the license of the assets? [Yes]
|
| 331 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [N/A]
|
| 332 |
+
|
| 333 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
|
| 334 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [N/A]
|
| 335 |
+
|
| 336 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 337 |
+
|
| 338 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
|
| 339 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 340 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
a2efficientautomatedattackerforboostingadversarialtraining/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ddc1b2b7de2badf8c29852f189268702dfd41387ebb5c395f2e3c0c9fdc718c9
|
| 3 |
+
size 354672
|
a2efficientautomatedattackerforboostingadversarialtraining/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:80643f2ceb4991704f7e588aa0d512f8f5fee8e4da7c2e31c6dfb7d8625e071f
|
| 3 |
+
size 520405
|
acceleratedlinearizedlaplaceapproximationforbayesiandeeplearning/7ac12903-1e07-48f3-9786-82b906bc92f3_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dab14dbc02d12eeb63078f648db8417cdde05fd1c8b5526dc8f554c3c85de5d7
|
| 3 |
+
size 101887
|
acceleratedlinearizedlaplaceapproximationforbayesiandeeplearning/7ac12903-1e07-48f3-9786-82b906bc92f3_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f7702d89b93b9cdb976926d44007dfa2b50fcbeb241b65dfc2e9ed907f58bfb
|
| 3 |
+
size 129987
|
acceleratedlinearizedlaplaceapproximationforbayesiandeeplearning/7ac12903-1e07-48f3-9786-82b906bc92f3_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e57627369b8bc559287b81d4f39b642a4c7e4e1608a104a186212d209b6f4c1a
|
| 3 |
+
size 1500419
|
acceleratedlinearizedlaplaceapproximationforbayesiandeeplearning/full.md
ADDED
|
@@ -0,0 +1,455 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerated Linearized Laplace Approximation for Bayesian Deep Learning
|
| 2 |
+
|
| 3 |
+
Zhijie Deng $^{1,2}$ , Feng Zhou $^{2,3}$ , Jun Zhu $^{2,4}$ *
|
| 4 |
+
|
| 5 |
+
$^{1}$ Qing Yuan Research Institute, Shanghai Jiao Tong University
|
| 6 |
+
|
| 7 |
+
$^{2}$ Dept. of Comp. Sci. & Tech., BNRist Center, THU-Bosch Joint ML Center, Tsinghua University
|
| 8 |
+
|
| 9 |
+
<sup>3</sup> Center for Applied Statistics, School of Statistics, Renmin University of China
|
| 10 |
+
|
| 11 |
+
$^{4}$ Pazhou Laboratory (Huangpu), Guangzhou, China
|
| 12 |
+
|
| 13 |
+
zhijied@sjtu.edu.cn, feng.zhou@ruc.edu.cn, dcszj@tsinghua.edu.cn
|
| 14 |
+
|
| 15 |
+
# Abstract
|
| 16 |
+
|
| 17 |
+
Laplace approximation (LA) and its linearized variant (LLA) enable effortless adaptation of pretrained deep neural networks to Bayesian neural networks. The generalized Gauss-Newton (GGN) approximation is typically introduced to improve their tractability. However, LA and LLA are still confronted with non-trivial inefficiency issues and should rely on Kronecker-factored, diagonal, or even last-layer approximate GGN matrices in practical use. These approximations are likely to harm the fidelity of learning outcomes. To tackle this issue, inspired by the connections between LLA and neural tangent kernels (NTKs), we develop a Nystrom approximation to NTKs to accelerate LLA. Our method benefits from the capability of popular deep learning libraries for forward mode automatic differentiation, and enjoys reassuring theoretical guarantees. Extensive studies reflect the merits of the proposed method in aspects of both scalability and performance. Our method can even scale up to architectures like vision transformers. We also offer valuable ablation studies to diagnose our method. Code is available at https://github.com/thudzj/ELLA.
|
| 18 |
+
|
| 19 |
+
# 1 Introduction
|
| 20 |
+
|
| 21 |
+
Deep neural networks (DNNs) excel at modeling deterministic relationships and have become de facto solutions for diverse pattern recognition problems [18, 54]. However, DNNs fall short in reasoning about model uncertainty [1] and suffer from poor calibration [17]. These issues are intolerable in risk-sensitive scenarios like self-driving [27], healthcare [34], finance [25], etc.
|
| 22 |
+
|
| 23 |
+
Bayesian Neural Networks (BNNs) have emerged as effective prescriptions to these pathologies [37, 21, 44, 16]. They usually proceed by estimating the posteriors over high-dimensional NN parameters. Due to some intractable integrals, diverse approximate inference methods have been applied to learning BNNs, spanning variational inference (VI) [1, 36], Markov chain Monte Carlo (MCMC) [55, 4], Laplace approximation (LA) [38, 50, 30], etc.
|
| 24 |
+
|
| 25 |
+
LA has recently gained unprecedented attention because its post-hoc nature nicely suits with the pretraining-finetuning fashion in deep learning (DL). LA approximates the posterior with a Gaussian around its maximum, whose mean and covariance are the maximum a posteriori (MAP) and the inversion of the Hessian respectively. It is a common practice to approximate the Hessian with the generalized Gauss-Newton (GGN) matrix [40] to make the whole workflow more tractable.
|
| 26 |
+
|
| 27 |
+
Linearized LA (LLA) [14, 22] applies LA to the first-order approximation of the NN of concern. Immer et al. [22] argue that LLA is more sensible than LA in the presence of GGN approximation;
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
ELLA
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
LLA
|
| 34 |
+
Figure 1: 1-D regression on $y = \sin 2x + \epsilon, \epsilon \sim \mathcal{N}(0,0.2)$ . Red dots, central blue curves, and shaded regions refer to the training data, mean predictions, and uncertainty respectively. The model is a pretrained multilayer perceptron (MLP) with 3 hidden layers. As shown, the predictive uncertainty of ELLA is on par with or better than the competitors such as LLA with KFAC approximation (LLA-KFAC), LLA with diagonal approximation (LLA-Diag), and last-layer LLA (LLA*).
|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
LLA-KFAC
|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
LLA-Diag
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
LLA
|
| 44 |
+
|
| 45 |
+
LLA can perform on par with or better than popular alternatives on various uncertainty quantification (UQ) tasks [14, 6, 7]. The Laplace library [6] further substantially advances LLA's applicability, making it a simple and competing baseline for Bayesian DL.
|
| 46 |
+
|
| 47 |
+
The practical adoption of LA and LLA actually entails further approximations on top of GGN. E.g., when using Laplace to process a pretrained ResNet [18], practitioners are recommended to resort to a Kronecker-factored (KFAC) [40] or diagonal approximation of the full GGN matrix for tractability. An orthogonal tactic is to apply LA/LLA to only NNs' last layer [30]. Yet, the approximation errors in these cases can hardly be identified, significantly undermining the fidelity of the learning outcomes.
|
| 48 |
+
|
| 49 |
+
This paper aims at scaling LLA up to make probabilistic predictions in a more assurable way. We first revisit the inherent connections between Neural Tangent Kernels (NTKs) [24] and LLA [28, 22], and find that, if we can approximate the NTKs with the inner product of some low-dimensional vector representations of the data, LLA can be considerably accelerated. Given this finding, we propose to adapt the Nyström method to approximate the NTKs of multi-output NNs, and advocate leveraging forward mode automatic differentiation $(fwAD)$ to efficiently compute the involved Jacobian-vector products (JVPs). The resultant accElerated LLA (ELLA) preserves the principal structures of vanilla LLA yet without explicitly computing/storing the costly GGN/Jacobian matrices for the training data. What's more, we theoretically analyze the approximation error between the predictive of ELLA and that of vanilla LLA, and find that it deceases rapidly as the Nyström approximation becomes accurate.
|
| 50 |
+
|
| 51 |
+
We perform extensive studies to show that ELLA can be a low-cost and effective baseline for Bayesian DL. We first describe how to specify the hyperparameters of ELLA, and use an illustrative regression task to demonstrate the effectiveness of ELLA (see Figure 1). We then experiment on standard image classification benchmarks to exhibit the superiority of ELLA over competing baselines in aspects of both performance and scalability. We further show that ELLA can even scale up to modern architectures like vision transformers (ViTs) [11].
|
| 52 |
+
|
| 53 |
+
# 2 Background
|
| 54 |
+
|
| 55 |
+
Consider a learning problem on $\mathcal{D} = (\mathbf{X},\mathbf{Y}) = \{(x_i,y_i)\}_{i = 1}^N$ , where $x_{i}\in \mathcal{X}$ and $y_{i}\in \mathbb{R}^{C}$ (e.g., regression) or $\{0,1\} ^C$ (e.g., classification) refer to observations and targets respectively. The advance in machine learning suggests using an NN $g_{\theta}(\cdot):\mathcal{X}\to \mathbb{R}^{C}$ with parameters $\pmb {\theta}\in \mathbb{R}^{P}$ for data fitting. Despite well-performing, regularly trained NNs only capture the most likely interpretation for the data, thus miss the ability to reason about uncertainty and are prone to overfitting and overconfidence.
|
| 56 |
+
|
| 57 |
+
BNNs [37, 21, 44] characterize model uncertainty by probabilistic principle and can holistically represent all likely interpretations. Typically, BNNs impose a prior $p(\pmb{\theta})$ on NN parameters and chase the Bayesian posterior $p(\pmb{\theta}|\mathcal{D}) = p(\mathcal{D}|\pmb{\theta})p(\pmb{\theta}) / p(\mathcal{D})$ where $p(\mathcal{D}|\pmb{\theta}) = \prod_{i}p(\pmb{y}_{i}|\pmb{x}_{i},\pmb{\theta}) = \prod_{i}p(\pmb{y}_{i}|g_{\pmb{\theta}}(\pmb{x}_{i}))$ . Analytical estimation is usually intractable due to NNs' high nonlinearity. Thereby, BNN methods usually find a surrogate of the true posterior $q(\pmb{\theta}) \approx p(\pmb{\theta}|\mathcal{D})$ via approximate inference methods like variational inference (VI) [1, 20, 36, 62, 29], Laplace approximation (LA) [38, 50], Markov chain Monte Carlo (MCMC) [55, 4, 63], particle-optimization based variational inference (POVI) [35], etc.
|
| 58 |
+
|
| 59 |
+
BNNs predict for new data $\pmb{x}_{*}$ by posterior predictive $p(\pmb{y}|\pmb{x}_{*},\mathcal{D}) = \mathbb{E}_{p(\pmb{\theta}|\mathcal{D})}p(\pmb{y}|\pmb{x}_{*},\pmb{\theta}) \approx \mathbb{E}_{q(\pmb{\theta})}p(\pmb{y}|\pmb{x}_{*},\pmb{\theta}) \approx \frac{1}{S}\sum_{s=1}^{S}p(\pmb{y}|g_{\pmb{\theta}_s}(\pmb{x}_{*}))$ where $\pmb{\theta}_s \sim q(\pmb{\theta})$ are i.i.d. Monte Carlo (MC) samples.
|
| 60 |
+
|
| 61 |
+
# 2.1 Laplace Approximation and Its Linearized Variant
|
| 62 |
+
|
| 63 |
+
Typically, LA builds a Gaussian approximate posterior in the form of $q(\pmb{\theta}) = \mathcal{N}(\pmb{\theta};\hat{\pmb{\theta}},\pmb{\Sigma})$ , where $\hat{\pmb{\theta}}$ denotes the MAP solution, i.e., $\hat{\pmb{\theta}} = \arg \max_{\pmb{\theta}}\log p(\mathcal{D}|\pmb{\theta}) + \log p(\pmb{\theta})$ , and $\pmb{\Sigma}$ is the inversion of the Hessian of the negative log posterior w.r.t. parameters, i.e., $\pmb{\Sigma}^{-1} = -\nabla_{\pmb{\theta}\pmb{\theta}}^{2}(\log p(\mathcal{D}|\pmb{\theta}) + \log p(\pmb{\theta}))|_{\pmb{\theta} = \hat{\pmb{\theta}}}$ . Without loss of generality, we base the following discussion on the isotropic Gaussian prior $p(\pmb{\theta}) = \mathcal{N}(\pmb{\theta};\mathbf{0},\sigma_0^2\mathbf{I}_P)^1$ , so $-\nabla_{\pmb{\theta}\pmb{\theta}}^{2}\log p(\pmb{\theta})|_{\pmb{\theta} = \hat{\pmb{\theta}}}$ equals to $\mathbf{I}_P / \sigma_0^2$ .
|
| 64 |
+
|
| 65 |
+
Due to the intractability of the Hessian for NNs with massive parameters, it is a common practice to use the symmetric positive and semi-definite (SPSD) GGN matrix as a workaround, i.e.,
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
\boldsymbol {\Sigma} ^ {- 1} = \sum_ {i} J _ {\hat {\boldsymbol {\theta}}} \left(\boldsymbol {x} _ {i}\right) ^ {\top} \Lambda \left(\boldsymbol {x} _ {i}, \boldsymbol {y} _ {i}\right) J _ {\hat {\boldsymbol {\theta}}} \left(\boldsymbol {x} _ {i}\right) + \mathbf {I} _ {P} / \sigma_ {0} ^ {2}, \tag {1}
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
where $J_{\hat{\theta}}(\pmb {x})\triangleq \nabla_{\pmb{\theta}}g_{\pmb{\theta}}(\pmb {x})|_{\pmb {\theta} = \hat{\pmb{\theta}}}~$ and $\Lambda (\pmb {x},\pmb {y})\triangleq -\nabla_{\pmb{g}\pmb{g}}^{2}\log p(\pmb {y}|\pmb {g})|_{\pmb{g} = g_{\hat{\theta}}(\pmb {x})}.$
|
| 72 |
+
|
| 73 |
+
When concatenating $\{J_{\hat{\pmb{\theta}}}(\pmb{x}_i)\in \mathbb{R}^{C\times P}\}_{i = 1}^N$ as a big matrix $\mathbf{J}_{\hat{\pmb{\theta}},\mathbf{X}}\in \mathbb{R}^{NC\times P}$ and organizing $\{\Lambda (\pmb {x}_i,\pmb {y}_i)\in \mathbb{R}^{C\times C}\}_{i = 1}^N$ as a block-diagonal matrix $\Lambda_{\mathbf{X},\mathbf{Y}}\in \mathbb{R}^{NC\times NC}$ , we have:
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
\boldsymbol {\Sigma} = \left[ \mathbf {J} _ {\hat {\theta}, \mathbf {X}} ^ {\top} \boldsymbol {\Lambda} _ {\mathbf {X}, \mathbf {Y}} \mathbf {J} _ {\hat {\theta}, \mathbf {X}} + \mathbf {I} _ {P} / \sigma_ {0} ^ {2} \right] ^ {- 1}. \tag {2}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
Yet, LA suffers from underfitting [33]. This is probably because GGN approximation implicitly turns the original model $g_{\theta}(\pmb{x})$ into a linear one $g_{\pmb{\theta}}^{\mathrm{lin}}(\pmb{x}) = g_{\hat{\pmb{\theta}}}(\pmb{x}) + J_{\hat{\pmb{\theta}}}(\pmb{x})(\pmb{\theta} - \hat{\pmb{\theta}})$ but $g_{\pmb{\theta}}(\pmb{x})$ is still leveraged to make prediction. I.e., there exists a shift between posterior inference and prediction [22]. To mitigate this issue, a proposal is to predict with $g_{\pmb{\theta}}^{\mathrm{lin}}(\pmb{x})$ , giving rise to linearized LA (LLA) [14, 28, 22].
|
| 80 |
+
|
| 81 |
+
LA and LLA nicely fit the pretraining-finetuning fashion in DL - they can be post-hoc applied to pretrained models where only the GGN matrices require to be estimated and further inverted. By LA and LLA, practitioners can adapt off-the-shelf high performing DNNs to BNNs easily.
|
| 82 |
+
|
| 83 |
+
LLA has revealed strong results on diverse UQ problems [14, 6, 7]. The Laplace library [6] further advances LLA's applicability, and evidences LLA is competitive to popular alternatives [63, 32, 39].
|
| 84 |
+
|
| 85 |
+
Scalability issue The GGN matrix of size $P \times P$ is still unamenable in modern DL scenarios, so further approximations sparsifying it are always introduced. The diagonal and KFAC [40] approximations are commonly adopted ones [50, 62], where only a diagonal or block-diagonal structure of the original GNN matrix is preserved. An orthogonal tactic is to concern only a subspace of the high-dimensional parameter space (e.g., the parameter space of the last layer [30]) to reduce the scale of the GGN matrix. However, these strategies sacrifice the fidelity of the learning outcomes as the approximation errors in these cases can hardly be theoretically measured. To this end, we develop accElerated Linearized Laplace Approximation (ELLA) to push the limit of LLA in a more assured way.
|
| 86 |
+
|
| 87 |
+
# 3 Methodology
|
| 88 |
+
|
| 89 |
+
In this section, we first revisit the relation of LLA to Gaussian processes (GPs) and Neural Tangent Kernels (NTKs) [24]. After that, we reveal how to accelerate LLA by kernel approximation. Based on these findings, we develop an efficient implementation of ELLA using the Nyström method [58].
|
| 90 |
+
|
| 91 |
+
# 3.1 The Gaussian Process View of LLA
|
| 92 |
+
|
| 93 |
+
Integrating $q(\pmb{\theta}) = \mathcal{N}(\pmb{\theta};\hat{\pmb{\theta}},\pmb{\Sigma})$ with the linear model $g_{\pmb{\theta}}^{\mathrm{lin}}(\pmb{x})$ actually gives rise to a function-space approximate posterior [14, 28, 22] in the form of $q(f) = \mathcal{GP}(f|g_{\hat{\pmb{\theta}}}(\pmb{x}),\kappa_{\mathrm{LLA}}(\pmb{x},\pmb{x}'))$ with
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\kappa_ {\mathrm {L L A}} \left(\boldsymbol {x}, \boldsymbol {x} ^ {\prime}\right) \triangleq J _ {\hat {\boldsymbol {\theta}}} (\boldsymbol {x}) \boldsymbol {\Sigma} J _ {\hat {\boldsymbol {\theta}}} \left(\boldsymbol {x} ^ {\prime}\right) ^ {\top}. \tag {3}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
By Woodbury matrix identity [60], we have:
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\boldsymbol {\Sigma} = \left[ \mathbf {J} _ {\hat {\theta}, \mathbf {X}} ^ {\top} \boldsymbol {\Lambda} _ {\mathbf {X}, \mathbf {Y}} \mathbf {J} _ {\hat {\theta}, \mathbf {X}} + \mathbf {I} _ {P} / \sigma_ {0} ^ {2} \right] ^ {- 1} = \sigma_ {0} ^ {2} \left(\mathbf {I} _ {P} - \mathbf {J} _ {\hat {\theta}, \mathbf {X}} ^ {\top} \left[ \boldsymbol {\Lambda} _ {\mathbf {X}, \mathbf {Y}} ^ {- 1} / \sigma_ {0} ^ {2} + \mathbf {J} _ {\hat {\theta}, \mathbf {X}} \mathbf {J} _ {\hat {\theta}, \mathbf {X}} ^ {\top} \right] ^ {- 1} \mathbf {J} _ {\hat {\theta}, \mathbf {X}}\right). \tag {4}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
It follows that
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\kappa_ {\mathrm {L L A}} (\boldsymbol {x}, \boldsymbol {x} ^ {\prime}) = \sigma_ {0} ^ {2} \left(\kappa_ {\mathrm {N T K}} (\boldsymbol {x}, \boldsymbol {x} ^ {\prime}) - \kappa_ {\mathrm {N T K}} (\boldsymbol {x}, \mathbf {X}) \left[ \boldsymbol {\Lambda} _ {\mathbf {X}, \mathbf {Y}} ^ {- 1} / \sigma_ {0} ^ {2} + \kappa_ {\mathrm {N T K}} (\mathbf {X}, \mathbf {X}) \right] ^ {- 1} \kappa_ {\mathrm {N T K}} (\mathbf {X}, \boldsymbol {x} ^ {\prime})\right), \tag {5}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
where $\kappa_{\mathrm{NTK}}(\pmb{x},\pmb{x}^{\prime})\triangleq J_{\hat{\theta}}(\pmb{x})J_{\hat{\theta}}(\pmb{x}^{\prime})^{\top}$ denotes the neural tangent kernel (NTK) [24] corresponding to $g_{\hat{\theta}}(\pmb{x})$ . Note that $\kappa_{\mathrm{NTK}}(\pmb{x},\pmb{x}^{\prime})$ is a matrix-valued kernel, with values in the space of $C\times C$ matrices.
|
| 112 |
+
|
| 113 |
+
The main challenge then turns into the computation and inversion of the gram matrix $\kappa_{\mathrm{NTK}}(\mathbf{X},\mathbf{X})$ of size $NC\times NC$ . When either $N$ or $C$ is large, the estimation of $\kappa_{\mathrm{LLA}}$ still suffers from inefficiency issue. To address this, existing work [22] assumes independence among the $C$ output dimensions to cast $q(f)$ into $C$ independent GPs following [51], and randomly subsample $M\ll N$ data points to form a cheap substitute for the original gram matrix. Despite effective in some cases, these approximations are heuristic, lacking a clear theoretical foundation.
|
| 114 |
+
|
| 115 |
+
# 3.2 Scale Up LLA by Kernel Approximation
|
| 116 |
+
|
| 117 |
+
We show that if we can approximate $\kappa_{\mathrm{NTK}}(\pmb{x},\pmb{x}^{\prime})$ with the inner product of some explicit $C\times K$ dimensional representations of the data, i.e., $\kappa_{\mathrm{NTK}}(\pmb {x},\pmb{x}^{\prime})\approx \varphi (\pmb {x})\varphi (\pmb{x}^{\prime})^{\top}$ with $\varphi :\mathcal{X}\to \mathbb{R}^{C\times K}$ , the scalability of the whole workflow can be unleashed.
|
| 118 |
+
|
| 119 |
+
Concretely, letting $\varphi_{\mathbf{X}} \in \mathbb{R}^{NC \times K}$ be the concatenation of $\{\varphi(\pmb{x}_i) \in \mathbb{R}^{C \times K}\}_{i=1}^N$ , we have (detailed derivation in Appendix A.2; see also [9])
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
\begin{array}{l} \kappa_ {\mathrm {L L A}} (\boldsymbol {x}, \boldsymbol {x} ^ {\prime}) \approx \sigma_ {0} ^ {2} \Big (\varphi (\boldsymbol {x}) \varphi (\boldsymbol {x} ^ {\prime}) ^ {\top} - \varphi (\boldsymbol {x}) \varphi_ {\mathbf {X}} ^ {\top} \left[ \boldsymbol {\Lambda} _ {\mathbf {X}, \mathbf {Y}} ^ {- 1} / \sigma_ {0} ^ {2} + \varphi_ {\mathbf {X}} \varphi_ {\mathbf {X}} ^ {\top} \right] ^ {- 1} \varphi_ {\mathbf {X}} \varphi (\boldsymbol {x} ^ {\prime}) ^ {\top} \Big) \\ = \varphi (\boldsymbol {x}) \left[ \underbrace {\sum_ {i} \varphi \left(\boldsymbol {x} _ {i}\right) ^ {\top} \Lambda \left(\boldsymbol {x} _ {i} , \boldsymbol {y} _ {i}\right) \varphi \left(\boldsymbol {x} _ {i}\right) + \mathbf {I} _ {K} / \sigma_ {0} ^ {2}} _ {\mathbf {G}} \right] ^ {- 1} \varphi \left(\boldsymbol {x} ^ {\prime}\right) ^ {\top} \triangleq \kappa_ {\text {E L L A}} \left(\boldsymbol {x}, \boldsymbol {x} ^ {\prime}\right). \tag {6} \\ \end{array}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
The matrix $\mathbf{G} \in \mathbb{R}^{K \times K}$ possesses a similar formula to the $\boldsymbol{\Sigma}$ in the original $\kappa_{\mathrm{LLA}}$ in Equation (3), yet much smaller. Once having $\varphi$ , it is only required to perform one forward pass of $g$ and $\varphi$ for each training data to estimate $\mathbf{G}$ . When $K$ is reasonably small, e.g., $< 100$ , it is cheap to invert $\mathbf{G}$ .
|
| 126 |
+
|
| 127 |
+
# 3.3 Approximate NTKs via the Nyström Method
|
| 128 |
+
|
| 129 |
+
Typical kernel approximation means include Nyström method [45, 58] and random features-based methods [48, 49, 61, 43, 15, 9]. Given that the latter routinely relies on a relatively large number of random features to gain a faithful approximation (which means a large $K$ ), we suggest leveraging the Nyström method, which captures only several principal components of the kernel, to build $\varphi$ .
|
| 130 |
+
|
| 131 |
+
To comfortably apply the Nyström method, we first rewrite $\kappa_{\mathrm{NTK}}(\pmb{x},\pmb{x}^{\prime})$ as a scalar-valued kernel $\kappa_{\mathrm{NTK}}((\pmb{x},i),(\pmb{x}^{\prime},i^{\prime})) = J_{\hat{\theta}}(\pmb{x},i)J_{\hat{\theta}}(\pmb{x}^{\prime},i^{\prime})^{\top}$ where $J_{\hat{\theta}}(\pmb{x},i)\triangleq \nabla_{\theta}[g_{\theta}(\pmb{x})]^{(i)}|_{\theta = \hat{\theta}}:\mathcal{X}\times [C]\to \mathbb{R}^{1\times P2}$ computes the gradient of $i$ -th output w.r.t. parameters.
|
| 132 |
+
|
| 133 |
+
By Mercer's theorem [42],
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
\kappa_ {\mathrm {N T K}} \left((\boldsymbol {x}, i), \left(\boldsymbol {x} ^ {\prime}, i ^ {\prime}\right)\right) = \sum_ {k \geq 1} \mu_ {k} \psi_ {k} (\boldsymbol {x}, i) \psi_ {k} \left(\boldsymbol {x} ^ {\prime}, i ^ {\prime}\right), \tag {7}
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
where $\psi_{k}\in L^{2}(\mathcal{X}\times [C],q)^{3}$ refer to the eigenfunctions of the NTK w.r.t. some probability measure $q$ with $\mu_k\ge 0$ as the associated eigenvalues. Based on this, the Nyström method discovers the top- $K$ eigenvalues as well as the corresponding eigenfunctions for kernel approximation.
|
| 140 |
+
|
| 141 |
+
By definition, the eigenfunctions can represent the spectral information of the kernel:
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\int \kappa_ {\mathrm {N T K}} \left((\boldsymbol {x}, i), \left(\boldsymbol {x} ^ {\prime}, i ^ {\prime}\right)\right) \psi_ {k} \left(\boldsymbol {x} ^ {\prime}, i ^ {\prime}\right) q \left(\boldsymbol {x} ^ {\prime}, i ^ {\prime}\right) = \mu_ {k} \psi_ {k} (\boldsymbol {x}, i), \forall k \geq 1, \tag {8}
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
while being orthonormal under $q$ :
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\int \psi_ {k} (\boldsymbol {x}, i) \psi_ {k ^ {\prime}} (\boldsymbol {x}, i) q (\boldsymbol {x}, i) = \mathbb {1} [ k = k ^ {\prime} ], \forall k, k ^ {\prime} \geq 1. \tag {9}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
In our case, $q(\pmb{x}, i)$ can be factorized as the product of the data distribution and a uniform distribution over $\{1, \dots, C\}$ , which can be trivially sampled from. The Nyström method draws $M$ ( $M \geq K$ ) i.i.d. samples $\tilde{\mathbf{X}} = \{(\pmb{x}_1, i_1), \dots, (\pmb{x}_M, i_M)\}$ from $q$ to approximate the integration in Equation (8):
|
| 154 |
+
|
| 155 |
+
$$
|
| 156 |
+
\frac {1}{M} \sum_ {m = 1} ^ {M} \kappa_ {\mathrm {N T K}} ((\boldsymbol {x}, i), (\boldsymbol {x} _ {m}, i _ {m})) \psi_ {k} (\boldsymbol {x} _ {m}, i _ {m}) = \mu_ {k} \psi_ {k} (\boldsymbol {x}, i), \forall k \in [ K ]. \tag {10}
|
| 157 |
+
$$
|
| 158 |
+
|
| 159 |
+
Applying this equation to these samples gives rise to:
|
| 160 |
+
|
| 161 |
+
$$
|
| 162 |
+
\frac {1}{M} \sum_ {m = 1} ^ {M} \kappa_ {\mathrm {N T K}} \left(\left(\boldsymbol {x} _ {m ^ {\prime}}, i _ {m ^ {\prime}}\right), \left(\boldsymbol {x} _ {m}, i _ {m}\right)\right) \psi_ {k} \left(\boldsymbol {x} _ {m}, i _ {m}\right) = \mu_ {k} \psi_ {k} \left(\boldsymbol {x} _ {m ^ {\prime}}, i _ {m ^ {\prime}}\right), \forall k \in [ K ], m ^ {\prime} \in [ M ], \tag {11}
|
| 163 |
+
$$
|
| 164 |
+
|
| 165 |
+
then we arrive at
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
\frac {1}{M} \mathbf {K} \psi_ {k} \approx \mu_ {k} \psi_ {k}, k \in [ K ], \tag {12}
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
where $\mathbf{K} = \mathbf{J}_{\hat{\theta},\tilde{\mathbf{X}}}\mathbf{J}_{\hat{\theta},\tilde{\mathbf{X}}}^{\top}$ , with $\mathbf{J}_{\hat{\theta},\tilde{\mathbf{X}}} \in \mathbb{R}^{M \times P}$ as the concatenation of $\{J_{\hat{\theta}}(\boldsymbol{x}_m, i_m) \in \mathbb{R}^{1 \times P}\}_{m=1}^M$ and $\boldsymbol{\psi}_k = [\psi_k(\boldsymbol{x}_1, i_1), \dots, \psi_k(\boldsymbol{x}_M, i_M)]^{\top}$ . This implies a scaled eigendecomposition problem of $\mathbf{K}$ . We compute the top- $K$ eigenvalues $\lambda_1, \dots, \lambda_K$ of matrix $\mathbf{K}$ and record the corresponding orthonormal eigenvectors $\boldsymbol{u}_1, \dots, \boldsymbol{u}_K$ . Given the constraint in Equation (9), it is easy to see that:
|
| 172 |
+
|
| 173 |
+
$$
|
| 174 |
+
\mu_ {k} \approx \frac {\lambda_ {k}}{M}, \text {a n d} \psi_ {k} \left(\boldsymbol {x} _ {m}, i _ {m}\right) \approx \sqrt {M} \boldsymbol {u} _ {k} ^ {(m)}, m \in [ M ]. \tag {13}
|
| 175 |
+
$$
|
| 176 |
+
|
| 177 |
+
Combining Equation (13) and (10) yields the Nyström approximation of the top- $K$ eigenfunctions:
|
| 178 |
+
|
| 179 |
+
$$
|
| 180 |
+
\hat {\psi} _ {k} (\boldsymbol {x}, i) = \frac {\sqrt {M}}{\lambda_ {k}} \sum_ {m = 1} ^ {M} \boldsymbol {u} _ {k} ^ {(m)} \kappa_ {\mathrm {N T K}} ((\boldsymbol {x}, i), (\boldsymbol {x} _ {m}, i _ {m})) = \frac {\sqrt {M}}{\lambda_ {k}} J _ {\hat {\boldsymbol {\theta}}} (\boldsymbol {x}, i) \mathbf {J} _ {\hat {\boldsymbol {\theta}}, \tilde {\mathbf {X}}} ^ {\top} \boldsymbol {u} _ {k}. \tag {14}
|
| 181 |
+
$$
|
| 182 |
+
|
| 183 |
+
Then, the mapping $\varphi$ which satisfied $\kappa_{\mathrm{NTK}}(\pmb{x},\pmb{x}^{\prime})\approx \varphi (\pmb {x})\varphi (\pmb{x}^{\prime})^{\top}$ can be realized as:
|
| 184 |
+
|
| 185 |
+
$$
|
| 186 |
+
\begin{array}{l} [ \varphi (\boldsymbol {x}) ] ^ {(i, k)} = \hat {\psi} _ {k} (\boldsymbol {x}, i) \sqrt {\mu_ {k}} = J _ {\hat {\boldsymbol {\theta}}} (\boldsymbol {x}, i) \mathbf {J} _ {\hat {\boldsymbol {\theta}}, \tilde {\mathbf {X}}} ^ {\top} \boldsymbol {u} _ {k} / \sqrt {\lambda_ {k}}, (15) \\ \Rightarrow \varphi (\boldsymbol {x}) = \left[ J _ {\hat {\boldsymbol {\theta}}} (\boldsymbol {x}) \boldsymbol {v} _ {1}, \dots , J _ {\hat {\boldsymbol {\theta}}} (\boldsymbol {x}) \boldsymbol {v} _ {K} \right] \text {w i t h} \boldsymbol {v} _ {k} = \mathbf {J} _ {\hat {\boldsymbol {\theta}}, \tilde {\mathbf {X}}} ^ {\top} \boldsymbol {u} _ {k} / \sqrt {\lambda_ {k}}. (16) \\ \end{array}
|
| 187 |
+
$$
|
| 188 |
+
|
| 189 |
+
Connection to sparse approximations of GPs A recent work [57] has shown that sparse variational Gaussian processes (SVGP) [53, 3] is algebraically equivalent to the Nyström approximation because the Evidence Lower BOund (ELBO) of the former encompasses the learning objective of the latter. That's to say, ELLA implicitly deploys a sparse approximation to the original GP predictive of LLA.
|
| 190 |
+
|
| 191 |
+
Discussion In this work, the developed Nyström approximation to NTKs is mainly used for accelerating the computation of the predictive covariance of LLA, while it can also be easily applied to future work on the practical application of NTKs.
|
| 192 |
+
|
| 193 |
+
# 3.4 Implementation
|
| 194 |
+
|
| 195 |
+
As shown, the estimation of $\varphi$ on a data point $x$ degenerates as $K$ JVPs, which can be accomplished by invoking $f_{wAD}$ for $K$ times:
|
| 196 |
+
|
| 197 |
+
$$
|
| 198 |
+
\left( \begin{array}{l} \boldsymbol {x}, \hat {\boldsymbol {\theta}} \\ \boldsymbol {v} _ {k} \end{array} \right) \xrightarrow {\text {f o r w a r d}} \begin{array}{l} g _ {\hat {\boldsymbol {\theta}}} (\boldsymbol {x}) \\ J _ {\hat {\boldsymbol {\theta}}} (\boldsymbol {x}) \boldsymbol {v} _ {k} \end{array} , k \in [ K ], \tag {17}
|
| 199 |
+
$$
|
| 200 |
+
|
| 201 |
+
where the model output $g_{\hat{\theta}}(\pmb{x})$ and the JVP $J_{\hat{\theta}}(\pmb{x})\pmb{v}_k$ are simultaneously computed in one single forward pass. Prevalent DL libraries like PyTorch [47] and Jax [2] have already been armed with the capability for $f_{wAD}$ . Algorithm 2 details the procedure of building $\varphi$ in a PyTorch-like style.
|
| 202 |
+
|
| 203 |
+
With that, we can trivially compute the posterior $q(f)$ according to Equation (6), as detailed in Algorithm 1. The procedures for estimating $\mathbf{G}$ and the posterior can both be batched for acceleration.
|
| 204 |
+
|
| 205 |
+
Computation overhead The estimation of $\varphi$ involves $M$ forward and backward passes of $g_{\hat{\theta}}$ and the eigendecomposition of a matrix of size $M\times M$ . After that, we only need to make $\{v_k\}_{k = 1}^K$ persistent, which amounts to storing $K$ more NN copies. Estimating $\mathbf{G}^{-1}$ requires scanning the training set once and inverting a matrix of $K\times K$ , which is cheap. The evaluation of $q(f)$ embodies that of $\varphi$ , i.e., performing $K$ forward passes under the scope of $f w A D$ . This is similar to other BNNs that perform $S$ forward passes with different MC parameter samples to estimate the posterior predictive.
|
| 206 |
+
|
| 207 |
+
# 4 Theoretical Analysis
|
| 208 |
+
|
| 209 |
+
In this section, we theoretically analyze the approximation error of $\kappa_{\mathrm{ELLA}}(\pmb{x},\pmb{x}^{\prime})$ to $\kappa_{\mathrm{LLA}}(\pmb{x},\pmb{x}^{\prime})$ .
|
| 210 |
+
|
| 211 |
+
Algorithm 1: Build the LLA posterior.
|
| 212 |
+
g:NN pre-trained by MAP; $(\mathbf{X},\mathbf{Y})$ # training set; $C$ : number of classes
|
| 213 |
+
# $M,K,\sigma_0^2$ : hyper-parameters
|
| 214 |
+
def estimate_G( $\varphi ,\mathbf{X},\mathbf{Y},K,\sigma_0^2)$ .. $\mathbf{G} =$ zeros $(K,K)$ for $(x,y)$ in $(\mathbf{X},\mathbf{Y})$ .. $\pmb {g}_x,\pmb {\varphi}_x = \pmb {\varphi}(\pmb {x})$ $\Lambda_{x,y} =$ hessian(nll $(g_{x},y),g_{x})$ $\mathbf{G} + = \varphi_x^\top \Lambda_{x,y}\varphi_x$ return G+ eye $(K) / \sigma_0^2$
|
| 215 |
+
def_q_f( $\varphi ,\mathbf{G}^{-1},\pmb {x})$ $\pmb {g}_x,\pmb {\varphi}_x = \pmb {\varphi}(\pmb {x})$ $\kappa_{x,x} = \varphi_{x}\mathbf{G}^{-1}\varphi_{x}^{\top}$ return N $(g_{x},\kappa_{x,x})$ $\varphi =$ build_ $\varphi (g_{\hat{\theta}},\mathbf{X},C,M,K)$ $\mathbf{G}^{-1} =$ inv(estimate_G( $\varphi ,\mathbf{X},\mathbf{Y},K,\sigma_0^2)$ )
|
| 216 |
+
q_f=partial(_q_f, $\varphi ,\mathbf{G}^{-1}$
|
| 217 |
+
|
| 218 |
+
Algorithm 2: Build $\varphi$
|
| 219 |
+
def build $\varphi (g_{\hat{\theta}},\mathbf{X},C,M,K)$ .. def $\varphi (g_{\hat{\theta}},C,\{v_k\}_{k = 1}^K,x)$ .. $\varphi_{x} =$ zeros $(C,K)$ for $k$ in range $(K)$ : with $f w A D.$ enable(): $\pmb {g}_x,\pmb {j}\pmb {v}\pmb {p} = g_{(\hat{\theta},\pmb {v}_k)}(\pmb {x})$ $\varphi_{x}[:,k] = j\pmb {v}p$ return $\pmb {g}_x,\varphi_x$ $\mathbf{J}_{\hat{\theta},\tilde{\mathbf{X}}} =$ zeros $(M,\dim (\hat{\theta}))$ for $m$ in range $(M)$ .. $\pmb {x}_m =$ uniform_sample(X) $i_m =$ uniform_sample([C]) $\mathbf{J}_{\hat{\theta},\tilde{\mathbf{X}}}[m] =$ grad $(g_{\hat{\theta}}(x_m)[i_m],\hat{\theta})$ $\{\lambda_k,u_k\}_{k = 1}^K = \mathrm{eig}(\mathbf{J}_{\hat{\theta},\tilde{\mathbf{X}}}\mathbf{J}_{\hat{\theta},\tilde{\mathbf{X}}}^\top ,\mathrm{top} = K)$ for $k$ in range $(K)$ .. $\pmb {v}_k = \mathbf{J}_{\hat{\theta},\tilde{\mathbf{X}}}^\top \pmb {u}_k / \sqrt{\lambda_k}$ return partial $(\_ \varphi ,g_{\hat{\theta}},C,\{v_k\}_{k = 1}^K)$
|
| 220 |
+
|
| 221 |
+
To reduce unnecessary complexity, this section assumes using $M = K$ i.i.d. MC samples when performing the Nyström method. Then, $\kappa_{\mathrm{ELLA}}$ can be reformulated as (details deferred to Appendix A.3)
|
| 222 |
+
|
| 223 |
+
$$
|
| 224 |
+
\kappa_ {\mathrm {E L L A}} \left(\boldsymbol {x}, \boldsymbol {x} ^ {\prime}\right) = J _ {\hat {\theta}} (\boldsymbol {x}) \underbrace {\mathbf {J} _ {\hat {\theta} , \bar {\mathbf {X}}} ^ {\top} \left[ \mathbf {J} _ {\hat {\theta} , \bar {\mathbf {X}}} \mathbf {J} _ {\hat {\theta} , \mathbf {X}} ^ {\top} \boldsymbol {\Lambda} _ {\mathbf {X} , \mathbf {Y}} \mathbf {J} _ {\hat {\theta} , \mathbf {X}} \mathbf {J} _ {\hat {\theta} , \bar {\mathbf {X}}} ^ {\top} + \mathbf {J} _ {\hat {\theta} , \bar {\mathbf {X}}} \mathbf {J} _ {\hat {\theta} , \bar {\mathbf {X}}} ^ {\top} / \sigma_ {0} ^ {2} \right] ^ {- 1} \mathbf {J} _ {\hat {\theta} , \bar {\mathbf {X}}}} _ {\Sigma^ {\prime}} J _ {\hat {\theta}} (\boldsymbol {x}) ^ {\top}. \tag {18}
|
| 225 |
+
$$
|
| 226 |
+
|
| 227 |
+
Thereby, the gap between $\kappa_{\mathrm{ELLA}}(\pmb{x},\pmb{x}^{\prime})$ and $\kappa_{\mathrm{LLA}}(\pmb{x},\pmb{x}^{\prime})$ can be upper bounded by $\mathcal{E} = \| \pmb{\Sigma}' - \pmb{\Sigma}\|$ , where $\| \cdot \|$ represents the matrix 2-norm (i.e., the spectral norm). In typical cases, $P \gg M$ , thus with high probability $\mathbf{K} = \mathbf{J}_{\hat{\theta},\hat{\mathbf{X}}}\mathbf{J}_{\hat{\theta},\hat{\mathbf{X}}}^{\top} \succ 0$ . We present an upper bound of $\mathcal{E}$ as follows.
|
| 228 |
+
|
| 229 |
+
Theorem 1 (Proof in Appendix A.4). Let $c_{\Lambda}$ be a finite constant associated with $\Lambda$ , and $\mathcal{E}'$ the error of Nystrom approximation $\| \mathbf{J}_{\hat{\theta},\mathbf{X}}\mathbf{J}_{\hat{\theta},\tilde{\mathbf{X}}}^{\top}(\mathbf{J}_{\hat{\theta},\tilde{\mathbf{X}}}\mathbf{J}_{\hat{\theta},\tilde{\mathbf{X}}}^{\top})^{-1}\mathbf{J}_{\hat{\theta},\tilde{\mathbf{X}}}\mathbf{J}_{\hat{\theta},\mathbf{X}}^{\top} - \mathbf{J}_{\hat{\theta},\mathbf{X}}\mathbf{J}_{\hat{\theta},\mathbf{X}}^{\top}\|$ . It holds that
|
| 230 |
+
|
| 231 |
+
$$
|
| 232 |
+
\mathcal {E} \leq \sigma_ {0} ^ {4} c _ {\Lambda} \mathcal {E} ^ {\prime} + \sigma_ {0} ^ {2}.
|
| 233 |
+
$$
|
| 234 |
+
|
| 235 |
+
$\mathcal{E}'$ has been extensively analyzed by pioneering work [12, 5, 26], and we simply adapt the results developed by Drineas and Mahoney [12] to our case. We denote the maximum diagonal entry and the $M + 1$ -th largest eigenvalue of $\mathbf{J}_{\hat{\theta},\mathbf{X}}\mathbf{J}_{\hat{\theta},\mathbf{X}}^{\top}$ by $c_{\kappa}$ and $\tilde{\lambda}_{M + 1}$ respectively.
|
| 236 |
+
|
| 237 |
+
Theorem 2 (Error bound of Nyström approximation). With probability at least $1 - \delta$ , it holds that:
|
| 238 |
+
|
| 239 |
+
$$
|
| 240 |
+
\mathcal {E} ^ {\prime} \leq \tilde {\lambda} _ {M + 1} + \frac {N C}{\sqrt {M}} c _ {\kappa} (2 + \log \frac {1}{\delta}).
|
| 241 |
+
$$
|
| 242 |
+
|
| 243 |
+
Plugging this back to $\mathcal{E}$ , we arrive at the corollary below.
|
| 244 |
+
|
| 245 |
+
Corollary 1. With probability at least $1 - \delta$ , the following bound exists:
|
| 246 |
+
|
| 247 |
+
$$
|
| 248 |
+
\mathcal {E} \leq \sigma_ {0} ^ {4} c _ {\Lambda} (\tilde {\lambda} _ {M + 1} + \frac {N C}{\sqrt {M}} c _ {\kappa} (2 + \log \frac {1}{\delta})) + \sigma_ {0} ^ {2}.
|
| 249 |
+
$$
|
| 250 |
+
|
| 251 |
+
As desired, the upper bound of $\mathcal{E}$ decreases along with the growing of the number of MC samples $M$ .
|
| 252 |
+
|
| 253 |
+
# 5 Experiments
|
| 254 |
+
|
| 255 |
+
We first discuss how to specify the hyperparameters of ELLA, then expose an interesting finding. After that, we compare ELLA to competing baselines to evidence its merits in efficacy and scalability.
|
| 256 |
+
|
| 257 |
+

|
| 258 |
+
(a)
|
| 259 |
+
|
| 260 |
+

|
| 261 |
+
(b)
|
| 262 |
+
Figure 2: (a)-(b): The approximation errors $\epsilon_{\mathrm{Nystrom}}$ and $\epsilon_{\mathrm{ELLA}}$ vary w.r.t. $M$ and $K$ ( $M \geq K$ ). (c): The test NLL of ELLA on CIFAR-10 varies w.r.t. the number of training data $N$ .
|
| 263 |
+
|
| 264 |
+

|
| 265 |
+
(c)
|
| 266 |
+
|
| 267 |
+
# 5.1 Specification of Hyperparameters
|
| 268 |
+
|
| 269 |
+
Given an NN $g_{\hat{\theta}}$ pretrained by MAP, we need to specify the prior variance $\sigma_0^2$ , the number of MC samples in Nyström method $M$ , and the number of remaining eigenpairs $K$ before applying ELLA. We simply set $\sigma_0^2$ to $\frac{1}{N\gamma}$ with $\gamma$ as the weight decay coefficient used for pretraining according to [10].
|
| 270 |
+
|
| 271 |
+
We perform an empirical study to inspect how $M$ and $K$ affect the quality of the Nystrom approximation and the ELLA GP covariance. We take 2000 MNIST images as training set $\mathbf{X}$ , and 256 others as validation set $\mathbf{X}_{\mathrm{val}}$ . The architecture contains 2 convolutions and a linear head. Batch normalization [23] and ReLU activation are used. The number of parameters $P$ is 29, 034. Larger architectures or larger $\mathbf{X}$ will render the exact evaluation of $\kappa_{\mathrm{LLA}}$ unapproachable. We quantify the approximation error of the Nystrom method by $\epsilon_{\mathrm{Nystrom}} \triangleq \| \mathbf{J}_{\hat{\theta},\mathbf{X}} \mathbf{J}_{\hat{\theta},\bar{\mathbf{X}}}^{\top} (\mathbf{J}_{\hat{\theta},\bar{\mathbf{X}}} \mathbf{J}_{\hat{\theta},\bar{\mathbf{X}}}^{\top})^{-1} \mathbf{J}_{\hat{\theta},\bar{\mathbf{X}}} \mathbf{J}_{\hat{\theta},\mathbf{X}}^{\top} - \mathbf{J}_{\hat{\theta},\mathbf{X}} \mathbf{J}_{\hat{\theta},\mathbf{X}}^{\top} \| / \| \mathbf{J}_{\hat{\theta},\mathbf{X}} \mathbf{J}_{\hat{\theta},\mathbf{x}}^{\top} \|$ , and that from $\kappa_{\mathrm{ELLA}}$ to $\kappa_{\mathrm{LLA}}$ by $\epsilon_{\mathrm{ELLA}} \triangleq \frac{1}{|\mathbf{X}_{\mathrm{val}}|} \sum_{\boldsymbol{x} \in \mathbf{X}_{\mathrm{val}}} \| \kappa_{\mathrm{ELLA}}(\boldsymbol{x}, \boldsymbol{x}) - \kappa_{\mathrm{LLA}}(\boldsymbol{x}, \boldsymbol{x}) \| / \| \kappa_{\mathrm{LLA}}(\boldsymbol{x}, \boldsymbol{x}) \|$ . We vary $M$ from 4 to 2000 and $K$ from 4 to $M$ , and report the approximation errors in Figure 2 (a)-(b). We notice that 1) the larger $K$ the better; 2) when fixing $K$ , $\epsilon_{\mathrm{Nystrom}}$ and $\epsilon_{\mathrm{ELLA}}$ decay as $M$ grows; 3) $\epsilon_{\mathrm{Nystrom}}$ decays more rapidly than $\epsilon_{\mathrm{ELLA}}$ , echoing Theorem 1. Given that ELLA needs to store $K$ vectors of size $P$ , small $K$ is desired for efficiency. $K \in [16, 32]$ seems to be a reasonable choice given Figure 2. Besides, Appendix C.1 includes a direct study on how the test NLL of ELLA varies w.r.t. $M$ and $K$ on CIFAR-10 [31] benchmark. Given these results, we set $M = 2000$ and $K = 20$ in the following experiments unless otherwise stated.
|
| 272 |
+
|
| 273 |
+
ELLA (or LLA) finds a GP posterior, so predicts with a variant of the aforementioned posterior predictive, formulated as $p(\pmb{y}|\pmb{x}_*,\mathcal{D}) = \mathbb{E}_{p(f|\mathcal{D})}p(\pmb{y}|f(\pmb{x}_*)) \approx \mathbb{E}_{q(f)}p(\pmb{y}|f(\pmb{x}_*)) = \mathbb{E}_{\pmb{f}\sim \mathcal{N}(g_{\hat{\theta}}(\pmb{x}_*),\kappa_{\mathrm{ELLA}}(\pmb{x}_*,\pmb{x}_*))}p(\pmb{y}|\pmb{f})$ . In classification tasks, we use 512 MC samples to approximate the last expectation and it is cheap.
|
| 274 |
+
|
| 275 |
+
# 5.2 The Overfitting Issue of LLA
|
| 276 |
+
|
| 277 |
+
Reinspecting Equation (1) and (6), we see, with more training data involved, the covariance in LA, LLA, and ELLA shrinks and the uncertainty dissipates. However, under ubiquitous model misspecification [41], should the uncertainty vanish that fast? We perform a study with ResNets [18] on CIFAR-10 to seek an answer. Concretely, we randomly subsample $N$ data from the training set of CIFAR-10, and fit ELLA on them. We depict the test negative log-likelihood (NLL), accuracy, and expected calibration error (ECE) [17] of the deployed ELLA in Figure 2 (c) and Appendix C.2. We also provide the corresponding results of $\mathrm{LLA}^{*4}$ in Appendix C.3. The V-shape NLL curves across settings reflect the overfitting issue of ELLA and $\mathrm{LLA}^*$ (or more generally, LLA). Figure 7 also shows that tuning the prior precision w.r.t. marginal likelihood can alleviate the overfitting of $\mathrm{LLA}^*$ to some extent, which may be the reason why such an issue has not been reported by previous works. But also of note that tuning the prior precision cannot fully eliminate overfitting.
|
| 278 |
+
|
| 279 |
+
To address the overfitting issue, we advocate performing early stop when fitting ELLA/LLA on big data. Specifically, when iterating over the training data to estimate $\mathbf{G}$ (see Equation (6)), we
|
| 280 |
+
|
| 281 |
+
Table 1: Comparison on test accuracy $(\%)\uparrow$ NLL $\downarrow$ , and ECE $\downarrow$ on CIFAR-10. We report the average results over 5 random runs. As the accuracy values of most methods are close, we do not highlight the best.
|
| 282 |
+
|
| 283 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">ResNet-20</td><td colspan="3">ResNet-32</td><td colspan="3">ResNet-44</td><td colspan="3">ResNet-56</td></tr><tr><td>Acc.</td><td>NLL</td><td>ECE</td><td>Acc.</td><td>NLL</td><td>ECE</td><td>Acc.</td><td>NLL</td><td>ECE</td><td>Acc.</td><td>NLL</td><td>ECE</td></tr><tr><td>ELLA</td><td>92.5</td><td>0.233</td><td>0.009</td><td>93.5</td><td>0.215</td><td>0.008</td><td>93.9</td><td>0.204</td><td>0.007</td><td>94.4</td><td>0.187</td><td>0.007</td></tr><tr><td>MAP</td><td>92.6</td><td>0.282</td><td>0.039</td><td>93.5</td><td>0.292</td><td>0.041</td><td>94.0</td><td>0.275</td><td>0.039</td><td>94.4</td><td>0.252</td><td>0.037</td></tr><tr><td>MFVI-BF</td><td>92.7</td><td>0.231</td><td>0.016</td><td>93.5</td><td>0.222</td><td>0.020</td><td>93.9</td><td>0.206</td><td>0.018</td><td>94.4</td><td>0.188</td><td>0.016</td></tr><tr><td>\( LLA^* \)</td><td>92.6</td><td>0.269</td><td>0.034</td><td>93.5</td><td>0.259</td><td>0.033</td><td>94.0</td><td>0.237</td><td>0.028</td><td>94.4</td><td>0.213</td><td>0.022</td></tr><tr><td>\( LLA^* -KFAC \)</td><td>92.6</td><td>0.271</td><td>0.035</td><td>93.5</td><td>0.260</td><td>0.033</td><td>94.0</td><td>0.232</td><td>0.028</td><td>94.4</td><td>0.202</td><td>0.024</td></tr><tr><td>LLA-Diag</td><td>92.2</td><td>0.728</td><td>0.404</td><td>92.7</td><td>0.755</td><td>0.430</td><td>92.8</td><td>0.778</td><td>0.445</td><td>92.9</td><td>0.843</td><td>0.480</td></tr><tr><td>LLA-KFAC</td><td>92.0</td><td>0.852</td><td>0.467</td><td>91.8</td><td>1.027</td><td>0.547</td><td>91.4</td><td>1.091</td><td>0.566</td><td>89.8</td><td>1.174</td><td>0.579</td></tr></table>
|
| 284 |
+
|
| 285 |
+

|
| 286 |
+
Figure 3: NLL (Left) and ECE (Right) on CIFAR-10 corruptions for models trained with ResNet-56 architecture. Each box corresponds to a summary of the results across 19 types of skew.
|
| 287 |
+
|
| 288 |
+

|
| 289 |
+
|
| 290 |
+
continuously record the NLL of the current ELLA posterior on some validation data, and stop when there is a trend of overfitting. If we cannot access a validation set easily, we can apply strong data augmentation to some training data to form a substitute of the validation set. Compared to tuning the prior, early stopping also helps reduce the time cost of processing big data (e.g., ImageNet [8]).
|
| 291 |
+
|
| 292 |
+
# 5.3 Illustrative Regression
|
| 293 |
+
|
| 294 |
+
We build a regression problem with $N = 16$ samples from $y = \sin 2x + \epsilon, \epsilon \sim \mathcal{N}(0,0.2)$ as shown in Figure 1. The model is an MLP with 3 hidden layers and tanh activations, and we pretrain it by MAP for 1000 iterations. For ELLA, we set $M = 16$ and $K = 5$ for efficiency. Unless stated otherwise, we use the interfaces of Laplace [6] to implement LLA, LLA-KFAC, LLA-Diag, and LLA*. The hyperparameters of the competitors are equivalent to those of ELLA except for some dedicated ones like $M$ and $K$ . It is clear that ELLA delivers a closer approximation to LLA than LLA-Diag and LLA*. We further quantify the quality of the predictive distributions produced by these approximate LLA methods using certain metrics. Considering that in this case, the predictive distribution for one test datum is a Gaussian distribution, we use the KL divergence between the Gaussians yielded by the approximate LLA method and vanilla LLA as a proxy of the approximation error (averaged over a set of test points). The results are reported in Appendix C.4. As shown, LLA-KFAC comes pretty close to LLA. Yet, LLA seems to underestimate in-between uncertainty in this setting, so ELLA seems to be a more reliable (instead of more accurate) approximation than LLA-KFAC. We also highlight the higher scalability of ELLA than LLA-KFAC (see Figure 4(c)), which reflects that ELLA can strike a good trade-off between efficacy and efficiency.
|
| 295 |
+
|
| 296 |
+
# 5.4 CIFAR-10 Classification
|
| 297 |
+
|
| 298 |
+
Then, we evaluate ELLA on CIFAR-10 benchmark using ResNet architectures [18]. We obtain pretrained MAP models from the open source community. Apart from MAP, $\mathrm{LLA^{*}}$ , LLA-Diag, LLA-KFAC, we further introduce last-layer LLA with KFAC approximation ( $\mathrm{LLA^{*}}$ -KFAC) and mean-field VI via Bayesian finetuning [10] ( $\mathrm{MFVI-BF}$ ) as baselines. LLA cannot be directly applied as the GGN matrices are huge. These methods all locate in the family of Gaussian approximate posteriors and are all post-hoc applied to the pretrained models, so the comparisons will be fair. Regarding the setups, we use $M = 2000$ and $K = 20$ for ELLA; we use 20 MC samples to estimate
|
| 299 |
+
|
| 300 |
+

|
| 301 |
+
(a)
|
| 302 |
+
|
| 303 |
+

|
| 304 |
+
(b)
|
| 305 |
+
Figure 4: (a) Error versus confidence plots for methods trained on CIFAR-10 and tested on CIFAR-10+SVHN. (b) Test NLL of ELLA varies w.r.t. $K$ on CIFAR-10. (c) Comparison on the wall-clock time used for predicting all CIFAR-10 test data (measured on an NVIDIA A40 GPU). The results are obtained with ResNet-20 architecture.
|
| 306 |
+
|
| 307 |
+

|
| 308 |
+
(c)
|
| 309 |
+
|
| 310 |
+
Table 2: Comparison on test accuracy $(\%)\uparrow$ NLL $\downarrow$ , and ECE $\downarrow$ on ImageNet. We report the average results over 3 random runs.
|
| 311 |
+
|
| 312 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">ResNet-18</td><td colspan="3">ResNet-34</td><td colspan="3">ResNet-50</td></tr><tr><td>Acc.</td><td>NLL</td><td>ECE</td><td>Acc.</td><td>NLL</td><td>ECE</td><td>Acc.</td><td>NLL</td><td>ECE</td></tr><tr><td>ELLA</td><td>69.8</td><td>1.243</td><td>0.015</td><td>73.3</td><td>1.072</td><td>0.018</td><td>76.2</td><td>0.948</td><td>0.018</td></tr><tr><td>MAP</td><td>69.8</td><td>1.247</td><td>0.026</td><td>73.3</td><td>1.081</td><td>0.035</td><td>76.2</td><td>0.962</td><td>0.037</td></tr><tr><td>MFVI-BF</td><td>70.3</td><td>1.218</td><td>0.042</td><td>73.7</td><td>1.043</td><td>0.033</td><td>76.1</td><td>0.945</td><td>0.030</td></tr></table>
|
| 313 |
+
|
| 314 |
+
the posterior predictive of MFVI-BF (as it incurs 20 NN forward passes), and use 512 ones for the other methods as stated. We have enabled the tuning of the prior precision for all LLA baselines, but not for ELLA.
|
| 315 |
+
|
| 316 |
+
We present the comparison on test accuracy, NLL, and ECE in Table 1. As shown, ELLA exhibits superior NLL and ECE across settings. MFVI-BF also gives good NLL. LLA* and LLA*-KFAC can improve the uncertainty and calibration of MAP, yet underperforming ELLA. LLA-Diag and LLA-KFAC fail for unclear reasons (also reported by [9]), we thus exclude them from the following studies.
|
| 317 |
+
|
| 318 |
+
We then examine the models on the widely used out-of-distribution (OOD) generalization/robustness benchmark CIFAR-10 corruptions [19] and report the results in Figure 3 and Appendix C.5. It is prominent that ELLA surpasses the baselines in aspects of NLL and ECE at various levels of skew, implying its ability to make conservative predictions for OOD inputs.
|
| 319 |
+
|
| 320 |
+
We further evaluate the models on the combination of the CIFAR-10 test data and the OOD SVHN test data. The predictions on SVHN are all regarded as wrong due to label shift. We plot the average error rate of the samples with $\leq \tau$ ( $\tau \in [0,1]$ ) confidence in Figure 4 (a). As shown, ELLA makes less mistakes than the baselines under various confidence thresholds. Figure 4 (b) displays how $K$ impacts the test NLL. We see that $K \in [20,30]$ can already lead to good performance, reflecting the efficiency of ELLA. Another benefit of ELLA is that with it, we can actively control the performance vs. cost trade-off. Figure 4 (c) shows the comparison on the time used for predicting all CIFAR-10 test data. We note that ELLA is slightly slower than MFVI and substantially faster than non-last-layer LLA methods.
|
| 321 |
+
|
| 322 |
+
# 5.5 ImageNet Classification
|
| 323 |
+
|
| 324 |
+
We apply ELLA to ImageNet classification [8] to demonstrate its scalability. The experiment settings are identical with those for CIFAR-10. We observe that all LLA methods implemented with Laplace would cause out-of-memory (OOM) errors or suffer from very long fitting procedures, thus take only MAP and MFVI-BF as baselines. Table 2 presents the comparison on test accuracy, NLL, and ECE with ResNet architectures. We see that ELLA maintains its superiority in ECE while MFVI-BF can induce higher accuracy and lower NLL. This may be attributed to that the pretrained MAP model lies at a sharp maxima of the true posterior so it is necessary to properly adjust the mean of the Gaussian approximate posterior.
|
| 325 |
+
|
| 326 |
+

|
| 327 |
+
Figure 5: NLL (Left) and ECE (Right) on ImageNet corruptions for models trained with ViT-B architecture. Each box corresponds to a summary of the results across 19 types of skew.
|
| 328 |
+
|
| 329 |
+

|
| 330 |
+
|
| 331 |
+
We lastly apply ELLA to ViT-B [11]. We compare ELLA to MAP in Table 3 as all other baselines incur OOM errors or crushingly long running time. As shown, ELLA beats MAP in multiple aspects. Figure 5 shows the results of ELLA and MAP on ImageNet corruptions [19]. They are consistent with those for CIFAR-10 corruptions. We reveal by this experiment that ELLA can be a more applicable and scalable method than most of existing BNNs.
|
| 332 |
+
|
| 333 |
+
Table 3: Comparison on test accuracy $(\%)$ ↑, NLL ↓, and ECE ↓ on ImageNet with ViT-B architecture.
|
| 334 |
+
|
| 335 |
+
<table><tr><td>Method</td><td>Acc.</td><td>NLL</td><td>ECE</td></tr><tr><td>ELLA</td><td>81.6</td><td>0.695</td><td>0.022</td></tr><tr><td>MAP</td><td>81.5</td><td>0.700</td><td>0.039</td></tr></table>
|
| 336 |
+
|
| 337 |
+
# 6 Related Work
|
| 338 |
+
|
| 339 |
+
LA [38, 50, 14, 30, 22, 6, 7] locally approximates the Bayesian posterior with a Gaussian distribution, analogous to VI with Gaussian variationals [16, 1, 36, 52, 46] and SWAG [39]. LA can be applied to pretrained models effortlessly while the acquired posteriors are potentially restrictive (see Section 5.5). VI enjoys higher flexibility yet relies on costly training; BayesAdapter [10] seems to be a remedy to the issue but its accessibility is still lower than LA. SWAG stores a series of SGD iterates to heuristically construct an approximate posterior and is empirically weaker than LA/LLA [6].
|
| 340 |
+
|
| 341 |
+
Though more expressive approaches like deep ensembles [32] and MCMC [55, 4, 63] can explicitly explore diverse posterior modes, they face limitations in efficiency and scalability. What's more, it has been shown that LA/LLA can perform on par with or better than deep ensembles and cyclical MCMC on multiple benchmarks [6]. This may be attributed to the un-identified, complicated relationships between the parameters space and the function space of DNNs [59]. Also of note that LA can embrace deep ensembles to capture multiple posterior modes [13].
|
| 342 |
+
|
| 343 |
+
[9] introduces a general kernel approximation technique using neural networks. By contrast, we focus on leveraging kernel approximation to accelerate Laplace approximation. Thus, the focus of the two works is distinct. Indeed, this work shares a similar idea with the Sec 4.3 in [9] that LLA can be accelerated by kernel approximation. But, except for such an idea, this work differentiates from [9] in aspects like motivations, techniques, implementations, theoretical backgrounds, and applications. Our implementation, theoretical analysis, and some empirical findings are all novel.
|
| 344 |
+
|
| 345 |
+
# 7 Conclusion
|
| 346 |
+
|
| 347 |
+
This paper proposes ELLA, a simple, effective, and reliable approach for Bayesian deep learning. ELLA addresses the unreliability issues of existing approximations to LLA and is implemented based on Nyström method. We offer theoretical guarantees for ELLA and perform extensive studies to testify its efficacy and scalability. ELLA currently accounts for only the predictive, and extending it to estimate model evidence for model selection [38] deserves future investigation. Using model evidence to select the number of remaining eigenpairs $K$ for ELLA is also viable.
|
| 348 |
+
|
| 349 |
+
# Acknowledgments and Disclosure of Funding
|
| 350 |
+
|
| 351 |
+
This work was supported by the National Key Research and Development Program of China (No. 2017YFA0700904), NSF of China Projects (Nos. 62061136001, 62076145, 62106121, U19B2034, U1811461, U19A2081, 6197222); Beijing NSF Project (No. JQ19016); a grant from Tsinghua Institute for Guo Qiang; and the High Performance Computing Center, Tsinghua University. J.Z was also supported by the XPlorer Prize
|
| 352 |
+
|
| 353 |
+
# References
|
| 354 |
+
|
| 355 |
+
[1] Charles Blundell, Julien Cornebise, Koray Kavukcuoglu, and Daan Wierstra. Weight uncertainty in neural network. In International Conference on Machine Learning, pages 1613-1622, 2015.
|
| 356 |
+
[2] James Bradbury, Roy Frostig, Peter Hawkins, Matthew James Johnson, Chris Leary, Dougal Maclaurin, George Necula, Adam Paszke, Jake VanderPlas, Skye Wanderman-Milne, and Qiao Zhang. JAX: composable transformations of Python+NumPy programs, 2018.
|
| 357 |
+
[3] David Burt, Carl Edward Rasmussen, and Mark Van Der Wilk. Rates of convergence for sparse variational gaussian process regression. In International Conference on Machine Learning, pages 862-871. PMLR, 2019.
|
| 358 |
+
[4] Tianqi Chen, Emily Fox, and Carlos Guestrin. Stochastic gradient hamiltonian monte carlo. In International Conference on Machine Learning, pages 1683-1691. PMLR, 2014.
|
| 359 |
+
[5] Corinna Cortes, Mehryar Mohri, and Ameet Talwalkar. On the impact of kernel approximation on learning accuracy. In International Conference on Artificial Intelligence and Statistics, pages 113-120. JMLR Workshop and Conference Proceedings, 2010.
|
| 360 |
+
[6] Erik Daxberger, Agustinus Kristiadi, Alexander Immer, Runa Eschenhagen, Matthias Bauer, and Philipp Hennig. Laplace reflux-effortless bayesian deep learning. Advances in Neural Information Processing Systems, 34, 2021.
|
| 361 |
+
[7] Erik Daxberger, Eric Nalisnick, James U Allingham, Javier Antorán, and José Miguel Hernández-Lobato. Bayesian deep learning via subnetwork inference. In International Conference on Machine Learning, pages 2510-2521. PMLR, 2021.
|
| 362 |
+
[8] J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei. ImageNet: A Large-Scale Hierarchical Image Database. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2009.
|
| 363 |
+
[9] Zhijie Deng, Jiaxin Shi, and Jun Zhu. Neuralef: Deconstructing kernels by deep neural networks. arXiv preprint arXiv:2205.00165, 2022.
|
| 364 |
+
[10] Zhijie Deng, Hao Zhang, Xiao Yang, Yinpeng Dong, and Jun Zhu. Bayesadapter: Being bayesian, inexpensively and reliably, via bayesian fine-tuning. arXiv preprint arXiv:2010.01979, 2020.
|
| 365 |
+
[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020.
|
| 366 |
+
[12] Petros Drineas, Michael W Mahoney, and Nello Cristianini. On the nystrom method for approximating a gram matrix for improved kernel-based learning. Journal of Machine Learning Research, 6(12), 2005.
|
| 367 |
+
[13] Runa Eschenhagen, Erik Daxberger, Philipp Hennig, and Agustinus Kristiadi. Mixtures of laplace approximations for improved post-hoc uncertainty in deep learning. arXiv preprint arXiv:2111.03577, 2021.
|
| 368 |
+
[14] Andrew YK Foong, Yingzhen Li, José Miguel Hernández-Lobato, and Richard E Turner. 'in between'uncertainty in bayesian neural networks. arXiv preprint arXiv:1906.11537, 2019.
|
| 369 |
+
[15] Deena P Francis and Kumudha Raimond. Major advancements in kernel function approximation. Artificial Intelligence Review, 54(2):843-876, 2021.
|
| 370 |
+
[16] Alex Graves. Practical variational inference for neural networks. In Advances in Neural Information Processing Systems, pages 2348-2356, 2011.
|
| 371 |
+
[17] Chuan Guo, Geoff Pleiss, Yu Sun, and Kilian Q Weinberger. On calibration of modern neural networks. arXiv preprint arXiv:1706.04599, 2017.
|
| 372 |
+
[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 770-778, 2016.
|
| 373 |
+
[19] Dan Hendrycks and Thomas Dietterich. Benchmarking neural network robustness to common corruptions and perturbations. arXiv preprint arXiv:1903.12261, 2019.
|
| 374 |
+
[20] José Miguel Hernández-Lobato and Ryan Adams. Probabilistic backpropagation for scalable learning of Bayesian neural networks. In International Conference on Machine Learning, pages 1861-1869, 2015.
|
| 375 |
+
[21] Geoffrey Hinton and Drew Van Camp. Keeping neural networks simple by minimizing the description length of the weights. In ACM Conference on Computational Learning Theory, 1993.
|
| 376 |
+
[22] Alexander Immer, Maciej Korzepa, and Matthias Bauer. Improving predictions of bayesian neural nets via local linearization. In International Conference on Artificial Intelligence and Statistics, pages 703-711. PMLR, 2021.
|
| 377 |
+
|
| 378 |
+
[23] Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International Conference on Machine Learning, pages 448-456. PMLR, 2015.
|
| 379 |
+
[24] Arthur Jacot, Franck Gabriel, and Clément Hongler. Neural tangent kernel: Convergence and generalization in neural networks. arXiv preprint arXiv:1806.07572, 2018.
|
| 380 |
+
[25] Huisu Jang and Jaewook Lee. An empirical study on modeling and prediction of bitcoin prices with bayesian neural networks based on blockchain information. *IEEE Access*, 6:5427–5437, 2017.
|
| 381 |
+
[26] Rong Jin, Tianbao Yang, Mehrdad Mahdavi, Yu-Feng Li, and Zhi-Hua Zhou. Improved bounds for the nyström method with application to kernel classification. IEEE Transactions on Information Theory, 59(10):6939-6949, 2013.
|
| 382 |
+
[27] Alex Kendall and Yarin Gal. What uncertainties do we need in Bayesian deep learning for computer vision? In Advances in Neural Information Processing Systems, pages 5574-5584, 2017.
|
| 383 |
+
[28] Mohammad Emtiyaz Khan, Alexander Immer, Ehsan Abedi, and Maciej Korzepa. Approximate inference turns deep networks into gaussian processes. arXiv preprint arXiv:1906.01930, 2019.
|
| 384 |
+
[29] Mohammad Emtiyaz Khan, Didrik Nielsen, Voot Tangkaratt, Wu Lin, Yarin Gal, and Akash Srivastava. Fast and scalable Bayesian deep learning by weight-perturbation in adam. In International Conference on Machine Learning, pages 2616-2625, 2018.
|
| 385 |
+
[30] Agustinus Kristiadi, Matthias Hein, and Philipp Hennig. Being bayesian, even just a bit, fixes overconfidence in relu networks. arXiv preprint arXiv:2002.10118, 2020.
|
| 386 |
+
[31] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009.
|
| 387 |
+
[32] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable predictive uncertainty estimation using deep ensembles. In Advances in Neural Information Processing Systems, pages 6402-6413, 2017.
|
| 388 |
+
[33] Neil David Lawrence. Variational inference in probabilistic models. PhD thesis, CiteSeer, 2001.
|
| 389 |
+
[34] Christian Leibig, Vaneeda Allken, Murat Seçkin Ayhan, Philipp Berens, and Siegfried Wahl. Leveraging uncertainty information from deep neural networks for disease detection. Scientific Reports, 7(1):1-14, 2017.
|
| 390 |
+
[35] Qiang Liu and Dilin Wang. Stein variational gradient descent: A general purpose Bayesian inference algorithm. In Advances in Neural Information Processing Systems, pages 2378-2386, 2016.
|
| 391 |
+
[36] Christos Louizos and Max Welling. Structured and efficient variational deep learning with matrix gaussian posteriors. In International Conference on Machine Learning, pages 1708-1716, 2016.
|
| 392 |
+
[37] David JC MacKay. A practical Bayesian framework for backpropagation networks. Neural Computation, 4(3):448-472, 1992.
|
| 393 |
+
[38] David John Cameron Mackay. Bayesian methods for adaptive models. PhD thesis, California Institute of Technology, 1992.
|
| 394 |
+
[39] Wesley J Maddox, Pavel Izmailov, Timur Garipov, Dmitry P Vetrov, and Andrew Gordon Wilson. A simple baseline for bayesian uncertainty in deep learning. In Advances in Neural Information Processing Systems, pages 13153-13164, 2019.
|
| 395 |
+
[40] James Martens and Roger Grosse. Optimizing neural networks with kronecker-factored approximate curvature. In International Conference on Machine Learning, pages 2408-2417. PMLR, 2015.
|
| 396 |
+
[41] Andres Masegosa. Learning under model misspecification: Applications to variational and ensemble methods. Advances in Neural Information Processing Systems, 33:5479-5491, 2020.
|
| 397 |
+
[42] J Mercer. Functions of positive and negativetypeand theircomnection with the theory ofintegral equations. Philosophical Trinsdictions of the Rogyal Society, pages 4-415, 1909.
|
| 398 |
+
[43] Marina Munkhoeva, Yermek Kapushev, Evgeny Burnaev, and Ivan Oseledets. Quadrature-based features for kernel approximation. Advances in Neural Information Processing Systems, 31, 2018.
|
| 399 |
+
[44] Radford M Neal. Bayesian Learning for Neural Networks. PhD thesis, University of Toronto, 1995.
|
| 400 |
+
[45] Evert J Nyström. Über die praktische auflösung von integralgleichungen mit anwendungen auf randwertaufgaben. Acta Mathematica, 54:185-204, 1930.
|
| 401 |
+
[46] Kazuki Osawa, Siddharth Swaroop, Anirudh Jain, Runa Eschenhagen, Richard E Turner, Rio Yokota, and Mohammad Emtiyaz Khan. Practical deep learning with Bayesian principles. arXiv preprint arXiv:1906.02506, 2019.
|
| 402 |
+
[47] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, pages 8026-8037, 2019.
|
| 403 |
+
|
| 404 |
+
[48] Ali Rahimi and Benjamin Recht. Random features for large-scale kernel machines. Advances in Neural Information Processing Systems, 20, 2007.
|
| 405 |
+
[49] Ali Rahimi and Benjamin Recht. Weighted sums of random kitchen sinks: Replacing minimization with randomization in learning. Advances in Neural Information Processing Systems, 21, 2008.
|
| 406 |
+
[50] Hippolyt Ritter, Aleksandar Botev, and David Barber. A scalable laplace approximation for neural networks. In 6th International Conference on Learning Representations, volume 6. International Conference on Representation Learning, 2018.
|
| 407 |
+
[51] Matthias Seeger. Gaussian processes for machine learning. International journal of neural systems, 14(02):69-106, 2004.
|
| 408 |
+
[52] Shengyang Sun, Changyou Chen, and Lawrence Carin. Learning structured weight uncertainty in Bayesian neural networks. In International Conference on Artificial Intelligence and Statistics, pages 1283-1292, 2017.
|
| 409 |
+
[53] Michalis Titsias. Variational learning of inducing variables in sparse gaussian processes. In International Conference on Artificial Intelligence and Statistics, pages 567-574. PMLR, 2009.
|
| 410 |
+
[54] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in Neural Information Processing Systems, pages 5998-6008, 2017.
|
| 411 |
+
[55] Max Welling and Yee W Teh. Bayesian learning via stochastic gradient Langevin dynamics. In International Conference on Machine Learning, pages 681-688, 2011.
|
| 412 |
+
[56] Yeming Wen, Paul Vicol, Jimmy Ba, Dustin Tran, and Roger Grosse. Flipout: Efficient pseudo-independent weight perturbations on mini-batches. arXiv preprint arXiv:1803.04386, 2018.
|
| 413 |
+
[57] Veit Wild, Motonobu Kanagawa, and Dino Sejdinovic. Connections and equivalences between the nysth" on method and sparse variational gaussian processes. arXiv preprint arXiv:2106.01121, 2021.
|
| 414 |
+
[58] Christopher Williams and Matthias Seeger. Using the nystrom method to speed up kernel machines. Advances in Neural Information Processing Systems, 13, 2000.
|
| 415 |
+
[59] Andrew Gordon Wilson and Pavel Izmailov. Bayesian deep learning and a probabilistic perspective of generalization. arXiv preprint arXiv:2002.08791, 2020.
|
| 416 |
+
[60] Max A Woodbury. Inverting modified matrices. Statistical Research Group, 1950.
|
| 417 |
+
[61] Felix Xinnan X Yu, Ananda Theertha Suresh, Krzysztof M Choromanski, Daniel N Holtmann-Rice, and Sanjiv Kumar. Orthogonal random features. Advances in Neural Information Processing Systems, 29, 2016.
|
| 418 |
+
[62] Guodong Zhang, Shengyang Sun, David Duvenaud, and Roger Grosse. Noisy natural gradient as variational inference. In International Conference on Machine Learning, pages 5847-5856, 2018.
|
| 419 |
+
[63] Ruqi Zhang, Chunyuan Li, Jianyi Zhang, Changyou Chen, and Andrew Gordon Wilson. Cyclic stochastic gradient mcmc for bayesian deep learning. arXiv preprint arXiv:1902.03932, 2019.
|
| 420 |
+
|
| 421 |
+
# Checklist
|
| 422 |
+
|
| 423 |
+
1. For all authors...
|
| 424 |
+
|
| 425 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 426 |
+
(b) Did you describe the limitations of your work? [Yes] See Section Related Work.
|
| 427 |
+
(c) Did you discuss any potential negative societal impacts of your work? [N/A]
|
| 428 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 429 |
+
|
| 430 |
+
2. If you are including theoretical results...
|
| 431 |
+
|
| 432 |
+
(a) Did you state the full set of assumptions of all theoretical results? [N/A]
|
| 433 |
+
(b) Did you include complete proofs of all theoretical results? [Yes] See Appendix.
|
| 434 |
+
|
| 435 |
+
3. If you ran experiments...
|
| 436 |
+
|
| 437 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes] See supplemental material.
|
| 438 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes] See Section Experiments.
|
| 439 |
+
|
| 440 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [No]
|
| 441 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [No]
|
| 442 |
+
|
| 443 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 444 |
+
|
| 445 |
+
(a) If your work uses existing assets, did you cite the creators? [Yes]
|
| 446 |
+
(b) Did you mention the license of the assets? [N/A]
|
| 447 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [No]
|
| 448 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
|
| 449 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [N/A]
|
| 450 |
+
|
| 451 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 452 |
+
|
| 453 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
|
| 454 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 455 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
acceleratedlinearizedlaplaceapproximationforbayesiandeeplearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:96481025d7589ae2f3933e98289a6de3b788a3d445073dd26a4fb2a330a5dee6
|
| 3 |
+
size 402232
|
acceleratedlinearizedlaplaceapproximationforbayesiandeeplearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5b895198297281ddf2790e8155fbbf417a3f8c2b5b7b6b31868fa2c19248e5ea
|
| 3 |
+
size 663064
|
acceleratedprimaldualgradientmethodforsmoothandconvexconcavesaddlepointproblemswithbilinearcoupling/773ec92a-9a41-4f29-ba68-dca69d66506e_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b171bb58cda19fac9f084b167b6016743316685ba5badb4e7118ef2644de1f91
|
| 3 |
+
size 84628
|
acceleratedprimaldualgradientmethodforsmoothandconvexconcavesaddlepointproblemswithbilinearcoupling/773ec92a-9a41-4f29-ba68-dca69d66506e_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d24061411c17614883fd8c3743bfcdbcabcbd50c278d80d8425bef36503550d9
|
| 3 |
+
size 107719
|
acceleratedprimaldualgradientmethodforsmoothandconvexconcavesaddlepointproblemswithbilinearcoupling/773ec92a-9a41-4f29-ba68-dca69d66506e_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f19b31234972052bf3cb9a7f1e02998557bfb0c22f8d8cbad33cb6a79eecde1
|
| 3 |
+
size 392696
|
acceleratedprimaldualgradientmethodforsmoothandconvexconcavesaddlepointproblemswithbilinearcoupling/full.md
ADDED
|
@@ -0,0 +1,513 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerated Primal-Dual Gradient Method for Smooth and Convex-Concave Saddle-Point Problems with Bilinear Coupling
|
| 2 |
+
|
| 3 |
+
Dmitry Kovalev
|
| 4 |
+
|
| 5 |
+
KAUST*
|
| 6 |
+
|
| 7 |
+
dakovalev1@gmail.com
|
| 8 |
+
|
| 9 |
+
Alexander Gasnikov
|
| 10 |
+
|
| 11 |
+
MIPT† ISP RAS‡ HSE§
|
| 12 |
+
|
| 13 |
+
gasnikov@yandex.ru
|
| 14 |
+
|
| 15 |
+
Peter Richtárik
|
| 16 |
+
|
| 17 |
+
KAUST
|
| 18 |
+
|
| 19 |
+
richtarik@gmail.com
|
| 20 |
+
|
| 21 |
+
# Abstract
|
| 22 |
+
|
| 23 |
+
In this paper we study the convex-concave saddle-point problem $\min_x\max_yf(x) + y^T\mathbf{A}x - g(y)$ , where $f(x)$ and $g(y)$ are smooth and convex functions. We propose an Accelerated Primal-Dual Gradient Method (APDG) for solving this problem, achieving (i) an optimal linear convergence rate in the strongly-convex-strongly-concave regime, matching the lower complexity bound (Zhang et al., 2021), and (ii) an accelerated linear convergence rate in the case when only one of the functions $f(x)$ and $g(y)$ is strongly convex or even none of them are. Finally, we obtain a linearly convergent algorithm for the general smooth and convex-concave saddle point problem $\min_x\max_yF(x,y)$ without the requirement of strong convexity or strong concavity.
|
| 24 |
+
|
| 25 |
+
# 1 Introduction
|
| 26 |
+
|
| 27 |
+
In this paper we revisit the well studied smooth convex-concave saddle point problem with a bilinear coupling function, which takes the form
|
| 28 |
+
|
| 29 |
+
$$
|
| 30 |
+
\min _ {x \in \mathbb {R} ^ {d _ {x}}} \max _ {y \in \mathbb {R} ^ {d _ {y}}} F (x, y) = f (x) + y ^ {\top} \mathbf {A} x - g (y), \tag {1}
|
| 31 |
+
$$
|
| 32 |
+
|
| 33 |
+
where $f(x) \colon \mathbb{R}^{d_x} \to \mathbb{R}$ and $g(y) \colon \mathbb{R}^{d_y} \to \mathbb{R}$ are smooth and convex functions, and $\mathbf{A} \in \mathbb{R}^{d_y \times d_x}$ is a coupling matrix.
|
| 34 |
+
|
| 35 |
+
Problem (1) has a large number of application, some of which we now briefly introduce.
|
| 36 |
+
|
| 37 |
+
# 1.1 Empirical risk minimization
|
| 38 |
+
|
| 39 |
+
A classical application is the regularized empirical risk minimization (ERM) with linear predictors, which is a classical supervised learning problem. Given a data matrix $\mathbf{A} = [a_1,\dots ,a_n]^{\top}\in \mathbb{R}^{n\times d}$ , where $a_{i}\in \mathbb{R}^{d}$ is the feature vector of the $i$ -th data entry, our goal is to find a solution of
|
| 40 |
+
|
| 41 |
+
$$
|
| 42 |
+
\min _ {x} f (x) + \ell (\mathbf {A} x), \tag {2}
|
| 43 |
+
$$
|
| 44 |
+
|
| 45 |
+
where $f(x): \mathbb{R}^d \to \mathbb{R}$ is a convex regularizer, $\ell(y): \mathbb{R}^n \to \mathbb{R}$ is a convex loss function, and $x \in \mathbb{R}^d$ is a linear predictor. Alternatively, one can solve the following equivalent saddle-point reformulation
|
| 46 |
+
|
| 47 |
+
of problem (2):
|
| 48 |
+
|
| 49 |
+
$$
|
| 50 |
+
\min _ {x} \max _ {y} f (x) + y ^ {\top} \mathbf {A} x - \ell^ {*} (y). \tag {3}
|
| 51 |
+
$$
|
| 52 |
+
|
| 53 |
+
The saddle-point reformulation is often preferable. For example, when such a formulation admits a finite sum structure (Zhang and Lin, 2015; Wang and Xiao, 2017), this may reduce the communication complexity in the distributed setting (Xiao et al., 2019), and one may also better exploit the underlying sparsity structure (Lei et al., 2017).
|
| 54 |
+
|
| 55 |
+
# 1.2 Reinforcement learning
|
| 56 |
+
|
| 57 |
+
In reinforcement learning (RL) we are given a sequence $\{(s_t, a_t, r_t, s_{t+1})\}_{t=1}^n$ generated by a policy $\pi$ , where $s_t$ is the state at time step $t$ , $a_t$ is the action taken at time step $t$ by policy $\pi$ and $r_t$ is the reward after taking action $a_t$ . A key step in many RL algorithms is to estimate the value function of a given policy $\pi$ , which is defined as
|
| 58 |
+
|
| 59 |
+
$$
|
| 60 |
+
V ^ {\pi} (s) = \mathbb {E} \left[ \sum_ {t = 0} ^ {\infty} \gamma^ {t} r _ {t} \mid s _ {0} = s, \pi \right], \tag {4}
|
| 61 |
+
$$
|
| 62 |
+
|
| 63 |
+
where $\gamma \in (0,1)$ is a discount factor. A common approach to this problem is to use a linear approximation $V^{\pi}(s) = \phi(s)^{\top}x$ , where $\phi(s)$ is a feature vector of a state $s$ . The model parameter $x$ is often estimated by minimizing the mean squared projected Bellman error
|
| 64 |
+
|
| 65 |
+
$$
|
| 66 |
+
\min _ {x} \| \mathbf {A} x - b \| _ {\mathbf {C} ^ {- 1}} ^ {2}, \tag {5}
|
| 67 |
+
$$
|
| 68 |
+
|
| 69 |
+
where $\mathbf{C} = \sum_{t=1}^{n} \phi(s_t) \phi(s_t)^\top$ , $b = \sum_{t=1}^{n} r_t \phi(s_t)$ and $\mathbf{A} = \mathbf{C} - \gamma \sum_{t=1}^{n} \phi(s_t) \phi(s_{t+1})^\top$ . One can observe that it is hard to apply gradient-based methods to problem (5) because this would require one to compute an inverse of the matrix $\mathbf{C}$ . In order to tackle this issue, one can solve an equivalent saddle-point reformulation proposed by Du et al. (2017) instead. This reformulation is given by
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
\min _ {x} \max _ {y} - 2 y ^ {\top} \mathbf {A} x - \| y \| _ {\mathbf {C}} ^ {2} + 2 b ^ {\top} y, \tag {6}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
and is an instance of problem (1). Solving this reformulation with gradient methods does not require matrix inversion.
|
| 76 |
+
|
| 77 |
+
# 1.3 Minimization under affine constraints
|
| 78 |
+
|
| 79 |
+
Next, consider the problem of convex minimization under affine constraints,
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
\min _ {\mathbf {A} x = b} f (x), \tag {7}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
where $b \in \text{rangeA}$ . This problem covers a wide range of applications, including inverse problems in imaging (Chambolle and Pock, 2016), sketched learning-type applications (Keriven et al., 2018), network flow optimization (Zargham et al., 2013) and optimal transport (Peyre et al., 2019).
|
| 86 |
+
|
| 87 |
+
Another important application of problem (7) is decentralized distributed optimization (Kovalev et al., 2020; Scaman et al., 2017; Li et al., 2020; Nedic et al., 2017; Arjevani et al., 2020; Ye et al., 2020). In this setting, the distributed minimization problem is often reformulated as
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\min _ {\sqrt {\mathbf {W}} \left(x _ {1}, \dots , x _ {n}\right) ^ {\top} = 0} \left[ f \left(x _ {1}, \dots , x _ {n}\right) = \sum_ {i = 1} ^ {n} f _ {i} \left(x _ {i}\right) \right], \tag {8}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
where $f_{i}(x_{i})$ is a function stored locally by a computing node $i \in \{1, \dots, n\}$ and $\mathbf{W} \in \mathbb{R}^{n \times n}$ is the Laplacian matrix of a graph representing the communication network. The constraint enforces consensus among the nodes: $x_{1} = \ldots = x_{n}$ .
|
| 94 |
+
|
| 95 |
+
One can observe that problem (7) is equivalent to the saddle-point formulation
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
\min _ {x} \max _ {y} f (x) + y ^ {\top} \mathbf {A} x - y ^ {\top} b, \tag {9}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
which is another instance of problem (1). State-of-the-art methods often focus on this formulation instead of directly solving (7). In particular, Salim et al. (2021) and Kovalev et al. (2020) obtained optimal algorithms for solving (7) and (8) using this saddle-point approach.
|
| 102 |
+
|
| 103 |
+
Table 1: Comparison of method (APGD, Algorithm 1) with existing state-of-the-art algorithms for solving problem (1) in the 5 different cases described in section 5.
|
| 104 |
+
|
| 105 |
+
Strongly-convex-strongly-concave case (section 5.1)
|
| 106 |
+
|
| 107 |
+
<table><tr><td>Algorithm 1</td><td>O\(\left(\max \left\{\sqrt{\frac{L_x}{\mu_x}},\sqrt{\frac{L_y}{\mu_y}},\frac{L_{xy}}{\sqrt{\mu_x\mu_y}}\right\}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td>Lower bound
|
| 108 |
+
Zhang et al. (2021b)</td><td>O\(\left(\max \left\{\sqrt{\frac{L_x}{\mu_x}},\sqrt{\frac{L_y}{\mu_y}},\frac{L_{xy}}{\sqrt{\mu_x\mu_y}}\right\}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td>DIPPA
|
| 109 |
+
Xie et al. (2021)</td><td>\(\tilde{\mathcal{O}}\left(\max \left\{\sqrt[4]{\frac{L_x^2L_y}{\mu_x^2\mu_y}},\sqrt[4]{\frac{L_xL_y^2}{\mu_x\mu_y}},\frac{L_{xy}}{\sqrt{\mu_x\mu_y}}\right\}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td>Proximal Best Response
|
| 110 |
+
Wang and Li (2020)</td><td>\(\tilde{\mathcal{O}}\left(\max \left\{\sqrt{\frac{L_x}{\mu_x}},\sqrt{\frac{L_y}{\mu_y}},\sqrt{\frac{L_xL_y}{\mu_x\mu_y}}\right\}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td colspan="2">Affinely constrained minimization case (section 5.2)</td></tr><tr><td>Algorithm 1</td><td>\(\mathcal{O}\left(\frac{L_{xy}}{\mu_{xy}}\sqrt{\frac{L_x}{\mu_x}}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td>Lower bound
|
| 111 |
+
Salim et al. (2021)</td><td>\(\mathcal{O}\left(\frac{L_{xy}}{\mu_{xy}}\sqrt{\frac{L_x}{\mu_x}}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td>OPAPC
|
| 112 |
+
Kovalev et al. (2020)</td><td>\(\mathcal{O}\left(\frac{L_{xy}}{\mu_{xy}}\sqrt{\frac{L_x}{\mu_x}}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td colspan="2">Strongly-convex-concave case (section 5.3)</td></tr><tr><td>Algorithm 1</td><td>\(\mathcal{O}\left(\max \left\{\frac{\sqrt{L_xL_y}}{\mu_xy},\frac{L_{xy}}{\mu_xy}\sqrt{\frac{L_x}{\mu_x}},\frac{L_{xy}^2}{\mu_xy}\right\}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td>Lower bound</td><td>N/A</td></tr><tr><td>Alt-GDA
|
| 113 |
+
Zhang et al. (2021a)</td><td>\(\mathcal{O}\left(\max \left\{\frac{L^2}{\mu_x^2},\frac{L}{\mu_x}\right\}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td colspan="2">Bilinear case (section 5.4)</td></tr><tr><td>Algorithm 1</td><td>\(\mathcal{O}\left(\frac{L_{xy}^2}{\mu_xy}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td>Lower bound
|
| 114 |
+
Ibrahim et al. (2020)</td><td>\(\mathcal{O}\left(\frac{L_{xy}}{\mu_{xy}}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td>Azizian et al. (2020)</td><td>\(\mathcal{O}\left(\frac{L_{xy}}{\mu_{xy}}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td colspan="2">Convex-concave case (section 5.5)</td></tr><tr><td>Algorithm 1</td><td>\(\mathcal{O}\left(\max \left\{\frac{\sqrt{L_xL_y}L_{xy}}{\mu_xy},\frac{L_x^2}{\mu_xy}\right\}\log \frac{1}{\epsilon}\right)\)</td></tr><tr><td>Lower bound</td><td>N/A</td></tr></table>
|
| 115 |
+
|
| 116 |
+
# 1.4 Bilinear min-max problems
|
| 117 |
+
|
| 118 |
+
Unconstrained bilinear saddle-point problems of the form
|
| 119 |
+
|
| 120 |
+
$$
|
| 121 |
+
\min _ {x \in \mathbb {R} ^ {d _ {x}}} \max _ {y \in \mathbb {R} ^ {d _ {y}}} a ^ {\top} x + y ^ {\top} \mathbf {A} x - b ^ {\top} y \tag {10}
|
| 122 |
+
$$
|
| 123 |
+
|
| 124 |
+
are another special case of problem (1), one where both $f(x)$ and $g(y)$ are linear functions. While such problems do not usually play an important role in practice, they are often a good testing ground for theoretical purposes (Gidel et al., 2019; Azizian et al., 2020; Zhang et al., 2021a; Mokhtari et al., 2020; Daskalakis et al., 2018; Liang and Stokes, 2019).
|
| 125 |
+
|
| 126 |
+
# 2 Literature Review and Contributions
|
| 127 |
+
|
| 128 |
+
In this work we are interested in algorithms able to solve problem (1) with a linear iteration complexity. That is, we are interested in methods that can provably find an $\epsilon$ -accurate solution of problem (1) in a number of iterations proportional to $\log \frac{1}{\epsilon}$ (see Definitions 2 and 3). This is typically achieved when
|
| 129 |
+
|
| 130 |
+
functions $f(x)$ and $g(x)$ are assumed to be strongly convex (see Definition 1). An example of this is the celebrated extragradient method of Korpelevich (1976).
|
| 131 |
+
|
| 132 |
+
Recent work has shown that linear iteration complexity can be achieved also in the less restrictive case when only one of the functions $f(x)$ and $g(x)$ is strongly convex. This was first shown by Du and Hu (2019), and later improved on by Zhang et al. (2021a).
|
| 133 |
+
|
| 134 |
+
However, and this is the starting point of our research, to the best of our knowledge, there are no algorithms with linear iteration complexity in the case when neither $f(x)$ nor $g(x)$ is strongly convex.
|
| 135 |
+
|
| 136 |
+
# 2.1 Acceleration
|
| 137 |
+
|
| 138 |
+
Loosely speaking, we say that an algorithm is non-accelerated if its iteration complexity is proportional to at least the first power of the condition numbers associated with the problem, such as $L_{x} / \mu_{x}$ and $L_{y} / \mu_{y}$ , where $L_{x}$ and $L_{y}$ are smoothness constants, and $\mu_{x}$ and $\mu_{y}$ are strong convexity constants (see Assumption 1 and Assumption 2). In contrast, the iteration complexity of an accelerated algorithm is proportional to the square root of such condition numbers, e.g., $\sqrt{L_x / \mu_x}$ and $\sqrt{L_y / \mu_y}$ .
|
| 139 |
+
|
| 140 |
+
There were several recent attempts to design accelerated algorithms for solving problem (1) (Xie et al., 2021; Wang and Li, 2020; Alkousa et al., 2020). These attempts rely on stacking multiple algorithms on top of each other, and result in complicated methods. For example, Lin et al. (2020) use a non-accelerated algorithm as a sub-routine for the inexact accelerated proximal-point method. This approach allows them to obtain accelerated algorithms for solving problem (1) in a straightforward and tractable way. However, this approach has significant drawbacks: the algorithms obtained this way have (i) additional logarithmic factors in their iteration complexity, and (ii) a complex nested structure with the requirement to manually set inner loop sizes, which is a byproduct of the design process based on combining multiple algorithms. This drawback limits the performance of the resulting algorithms in theory, and requires additional fine tuning in practice.
|
| 141 |
+
|
| 142 |
+
A philosophically different approach to designing such algorithms—one that we adopt in this work—is to attempt to provide a direct acceleration of a suitable algorithm for solving problem (1), similarly to what Nesterov (1983) did for convex minimization problems. While this technically more demanding, algorithms obtained this way typically don't have the aforementioned drawbacks. Hence, we follow the latter approach in this work.
|
| 143 |
+
|
| 144 |
+
# 2.2 Main contributions
|
| 145 |
+
|
| 146 |
+
In this work we propose an Accelerated Primal-Dual Gradient Method (APDG; Algorithm 1) for solving problem (1) and provide a theoretical analysis of its convergence properties (Theorem 1). In particular, we prove the following results.
|
| 147 |
+
|
| 148 |
+
(i) When both functions $f(x)$ and $g(y)$ are strongly convex, Algorithm 1 achieves the optimal linear convergence rate, matching the lower bound obtained by Zhang et al. (2021b). To the best of our knowledge, Algorithm 1 is the first optimal algorithm in this regime.
|
| 149 |
+
(ii) We establish linear convergence of Algorithm 1 in the case when only one of the functions $f(x)$ or $g(y)$ is strongly convex, and $\mathbf{A}$ is a full row or full column rank matrix, respectively. This improves upon the results provided by Du and Hu (2019); Zhang et al. (2021a).
|
| 150 |
+
(iii) We establish linear convergence of the Algorithm 1 in the case when neither of the functions $f(x)$ nor $g(y)$ is strongly convex, and the matrix $\mathbf{A}$ is square and full rank. To the best of our knowledge, Algorithm 1 is the first algorithm achieving linear convergence in this setting.
|
| 151 |
+
|
| 152 |
+
Table 1 provides a brief comparison of the complexity of Algorithm 1 (Theorem 1) with the current state of the art. Please refer to section 5 for a detailed discussion of this result and comparison with related work.
|
| 153 |
+
|
| 154 |
+
# 2.3 General min-max problem and additional contributions
|
| 155 |
+
|
| 156 |
+
In our work we also consider the saddle-point problem
|
| 157 |
+
|
| 158 |
+
$$
|
| 159 |
+
\min _ {x \in \mathbb {R} ^ {d _ {x}}} \max _ {y \in \mathbb {R} ^ {d _ {y}}} F (x, y), \tag {11}
|
| 160 |
+
$$
|
| 161 |
+
|
| 162 |
+
where $F(x,y) \colon \mathbb{R}^{d_x} \times \mathbb{R}^{d_y} \to \mathbb{R}$ is a smooth function, which is convex in $x$ and concave in $y$ . One can observe that the main problem (1) is a special case of this more general problem (11).
|
| 163 |
+
|
| 164 |
+
As an additional contribution, we propose a Gradient Descent-Ascent Method with Extrapolation (GDAE) for solving the general convex-concave saddle-point problem (11), and provide a theoretical analysis of its convergence properties.
|
| 165 |
+
|
| 166 |
+
(i) When the function $F(x,y)$ is strongly convex in $x$ and strongly concave in $y$ , GDAE achieves a linear convergence rate, which recovers the convergence result of Cohen et al. (2020).
|
| 167 |
+
(ii) Under certain assumptions on the way the variables $x$ and $y$ are coupled by the function $F(x, y)$ , we establish linear convergence of GDAE in the case when the function $F(x, y)$ is strongly-convex-concave, convex-strongly-concave, or even just convex-concave. To the best of our knowledge, GDAE is the first algorithm achieving linear convergence under such assumptions.
|
| 168 |
+
|
| 169 |
+
Please refer to the Appendix for a detailed description of these results and related work.
|
| 170 |
+
|
| 171 |
+
# 3 Basic Definitions and Assumptions
|
| 172 |
+
|
| 173 |
+
We start by formalizing the notions of smoothness and strong convexity of a function.
|
| 174 |
+
|
| 175 |
+
Definition 1. Function $h(z) \colon \mathbb{R}^d \to \mathbb{R}$ is $L$ -smooth and $\mu$ -strongly convex for $L \geq \mu \geq 0$ , if for all $z_1, z_2 \in \mathbb{R}^d$ the following inequality holds:
|
| 176 |
+
|
| 177 |
+
$$
|
| 178 |
+
\frac {\mu}{2} \| z _ {1} - z _ {2} \| ^ {2} \leq \mathrm {D} _ {h} \left(z _ {1}, z _ {2}\right) \leq \frac {L}{2} \| z _ {1} - z _ {2} \| ^ {2}. \tag {12}
|
| 179 |
+
$$
|
| 180 |
+
|
| 181 |
+
Above, $\mathrm{D}_h(z_1,z_2) = h(z_1) - h(z_2) - \langle \nabla h(z_2),z_1 - z_2\rangle$ is the Bregman divergence associated with the function $h(z)$ .
|
| 182 |
+
|
| 183 |
+
We are now ready to state the main assumptions that we impose on problem (1). We start with Assumptions 1 and 2 that formalize the strong-convexity and smoothness properties of functions $f(x)$ and $g(y)$ .
|
| 184 |
+
|
| 185 |
+
Assumption 1. Function $f(x)$ is $L_{x}$ -smooth and $\mu_{x}$ -strongly convex for $L_{x} \geq \mu_{x} \geq 0$ .
|
| 186 |
+
|
| 187 |
+
Assumption 2. Function $g(y)$ is $L_{y}$ -smooth and $\mu_{y}$ -strongly convex for $L_{y} \geq \mu_{y} \geq 0$ .
|
| 188 |
+
|
| 189 |
+
Note, that $\mu_x$ and $\mu_y$ are allowed to be zero. That is, both $f(x)$ and $g(y)$ are allowed to be non-strongly convex.
|
| 190 |
+
|
| 191 |
+
The following assumption formalizes the spectral properties of matrix $\mathbf{A}$ .
|
| 192 |
+
|
| 193 |
+
Assumption 3. There exist constants $L_{xy} > \mu_{xy}, \mu_{yx} \geq 0$ such that
|
| 194 |
+
|
| 195 |
+
$$
|
| 196 |
+
\mu_ {x y} ^ {2} \leq \left\{ \begin{array}{l l} \lambda_ {\min } ^ {+} (\mathbf {A A} ^ {\top}) & \nabla g (y) \in \operatorname {r a n g e} \mathbf {A} f o r a l l y \in \mathbb {R} ^ {d _ {y}} \\ \lambda_ {\min } (\mathbf {A A} ^ {\top}) & o t h e r w i s e \end{array} \right.
|
| 197 |
+
$$
|
| 198 |
+
|
| 199 |
+
$$
|
| 200 |
+
\mu_ {y x} ^ {2} \leq \left\{ \begin{array}{l l} \lambda_ {\min } ^ {+} (\mathbf {A} ^ {\top} \mathbf {A}) & \nabla f (x) \in \operatorname {r a n g e} \mathbf {A} ^ {\top} \text {f o r a l l} x \in \mathbb {R} ^ {d _ {x}} \\ \lambda_ {\min } (\mathbf {A} ^ {\top} \mathbf {A}) & \text {o t h e r w i s e} \end{array} \right.
|
| 201 |
+
$$
|
| 202 |
+
|
| 203 |
+
$$
|
| 204 |
+
L _ {x y} ^ {2} \geq \lambda_ {\max} (\mathbf {A} ^ {\top} \mathbf {A}) = \lambda_ {\max} (\mathbf {A A} ^ {\top}),
|
| 205 |
+
$$
|
| 206 |
+
|
| 207 |
+
where $\lambda_{\mathrm{min}}(\cdot), \lambda_{\mathrm{min}}^{+}(\cdot)$ and $\lambda_{\mathrm{max}}(\cdot)$ denote the smallest, smallest positive and largest eigenvalue of a matrix, respectively, and range denotes the range space of a matrix.
|
| 208 |
+
|
| 209 |
+
By $\mathcal{S} \subset \mathbb{R}^{d_x} \times \mathbb{R}^{d_y}$ we denote the solution set of problem (1). Note that $(x^*, y^*) \in \mathcal{S}$ if and only if $(x^*, y^*)$ satisfies the first-order optimality conditions
|
| 210 |
+
|
| 211 |
+
$$
|
| 212 |
+
\left\{ \begin{array}{l} \nabla_ {x} F \left(x ^ {*}, y ^ {*}\right) = \nabla f \left(x ^ {*}\right) + \mathbf {A} ^ {\top} y ^ {*} = 0, \\ \nabla_ {y} F \left(x ^ {*}, y ^ {*}\right) = - \nabla g \left(y ^ {*}\right) + \mathbf {A} x ^ {*} = 0. \end{array} \right. \tag {13}
|
| 213 |
+
$$
|
| 214 |
+
|
| 215 |
+
Our main goal is to propose an algorithm for finding a solution to problem (1). Numerical iterative algorithms typically find an approximate solution of a given problem. We formalize this through the following definition.
|
| 216 |
+
|
| 217 |
+
Definition 2. Let the solution set $\mathcal{S}$ be nonempty. We call a pair of vectors $(x, y) \in \mathbb{R}^{d_x} \times \mathbb{R}^{d_y}$ an $\epsilon$ -accurate solution of problem (1) for a given accuracy $\epsilon > 0$ if it satisfies
|
| 218 |
+
|
| 219 |
+
$$
|
| 220 |
+
\min _ {\left(x ^ {*}, y ^ {*}\right) \in \mathcal {S}} \max \left\{\| x - x ^ {*} \| ^ {2}, \| y - y ^ {*} \| ^ {2} \right\} \leq \epsilon . \tag {14}
|
| 221 |
+
$$
|
| 222 |
+
|
| 223 |
+
We also want to propose an efficient algorithm for solving problem (1). That is, we want to propose an algorithm with the lowest possible iteration complexity, which we define next.
|
| 224 |
+
|
| 225 |
+
Definition 3. The iteration complexity of an algorithm for solving problem (1) is the number of iterations the algorithm requires to find an $\epsilon$ -accurate solution of this problem. At each iteration the algorithm is allowed to perform $\mathcal{O}(1)$ computations of the gradients $\nabla f(x)$ and $\nabla g(y)$ and matrix-vector multiplications with matrices $\mathbf{A}$ and $\mathbf{A}^{\top}$ .
|
| 226 |
+
|
| 227 |
+
# 4 Accelerated Primal-Dual Gradient Method
|
| 228 |
+
|
| 229 |
+
Algorithm 1 APDG: Accelerated Primal-Dual Gradient Method
|
| 230 |
+
1: Input: $x^0 \in \mathrm{rangeA}^\top, y^0 \in \mathrm{rangeA}$ , $\eta_x, \eta_y, \alpha_x, \alpha_y, \beta_x, \beta_y > 0$ , $\tau_x, \tau_y, \sigma_x, \sigma_y \in (0,1]$ , $\theta \in (0,1)$
|
| 231 |
+
2: $x_f^0 = x^0$
|
| 232 |
+
3: $y_f^0 = y^{-1} = y^0$
|
| 233 |
+
4: for $k = 0,1,2,\ldots$ do
|
| 234 |
+
5: $y_m^k = y^k + \theta(y^k - y^{k-1})$
|
| 235 |
+
6: $x_g^k = \tau_x x^k + (1 - \tau_x) x_f^k$
|
| 236 |
+
7: $y_g^k = \tau_y y^k + (1 - \tau_y) y_f^k$
|
| 237 |
+
8: $x^{k+1} = x^k + \eta_x \alpha_x(x_g^k - x^k) - \eta_x \beta_x \mathbf{A}^\top (\mathbf{A} x^k - \nabla g(y_g^k)) - \eta_x (\nabla f(x_g^k) + \mathbf{A}^\top y_m^k)$
|
| 238 |
+
9: $y^{k+1} = y^k + \eta_y \alpha_y(y_g^k - y^k) - \eta_y \beta_y \mathbf{A} (\mathbf{A}^\top y^k + \nabla f(x_g^k)) - \eta_y (\nabla g(y_g^k) - \mathbf{A} x^{k+1})$
|
| 239 |
+
10: $x_f^{k+1} = x_g^k + \sigma_x (x^{k+1} - x^k)$
|
| 240 |
+
11: $y_f^{k+1} = y_g^k + \sigma_y (y^{k+1} - y^k)$
|
| 241 |
+
12: end for
|
| 242 |
+
|
| 243 |
+
In this section we present the Accelerated Primal-Dual Gradient Method (APDG; Algorithm 1) for solving problem (1). First, we prove an outline of the key ideas used in the development of this algorithm.
|
| 244 |
+
|
| 245 |
+
# 4.1 Algorithm development strategy
|
| 246 |
+
|
| 247 |
+
First, we observe that problem (1) is equivalent to the problem of finding a zero of a sum of two monotone operators, $G_{1}, G_{2} \colon \mathbb{R}^{d_{x}} \times \mathbb{R}^{d_{y}} \to \mathbb{R}^{d_{x}} \times \mathbb{R}^{d_{y}}$ , defined as
|
| 248 |
+
|
| 249 |
+
$$
|
| 250 |
+
G _ {1} \colon (x, y) \mapsto (\nabla f (x), \nabla g (y)), \tag {15}
|
| 251 |
+
$$
|
| 252 |
+
|
| 253 |
+
$$
|
| 254 |
+
G _ {2} \colon (x, y) \mapsto (\mathbf {A} ^ {\top} y, - \mathbf {A} x). \tag {16}
|
| 255 |
+
$$
|
| 256 |
+
|
| 257 |
+
Indeed, $G_{1}(x^{*},y^{*}) + G_{2}(x^{*},y^{*}) = 0$ is just another way to write the optimality conditions (13).
|
| 258 |
+
|
| 259 |
+
The Forward Backward algorithm. A natural way to tackle this problem is via Forward Backward algorithm (Bauschke and Combettes, 2011), the iterates of which have the form
|
| 260 |
+
|
| 261 |
+
$$
|
| 262 |
+
\left(x ^ {k + 1}, y ^ {k + 1}\right) = J _ {G _ {2}} \left(\left(x ^ {k}, y ^ {k}\right) - G _ {1} \left(x ^ {k}, y ^ {k}\right)\right), \tag {17}
|
| 263 |
+
$$
|
| 264 |
+
|
| 265 |
+
where the operator $J_{G_2}$ is the inverse of the operator $I + G_2$ , and $I$ is the identity operator. Note that $J_{G_2}$ can be written as $J_{G_2} \colon (x, y) \mapsto (x^+, y^+)$ , where $(x^+, y^+) \in \mathbb{R}^{d_x} \times \mathbb{R}^{d_y}$ is a solution of the linear system
|
| 266 |
+
|
| 267 |
+
$$
|
| 268 |
+
\left\{ \begin{array}{l} x ^ {+} = x - \mathbf {A} ^ {\top} y ^ {+} \\ y ^ {+} = y + \mathbf {A} x ^ {+} \end{array} \right. \tag {18}
|
| 269 |
+
$$
|
| 270 |
+
|
| 271 |
+
Linear extrapolation step. Next, notice that the computation of operator $J_{G_2}$ requires solving the linear system (18). This is expensive and has to be done at each iteration of the Forward Backward algorithm. Let us instead consider the related problem
|
| 272 |
+
|
| 273 |
+
$$
|
| 274 |
+
\left\{ \begin{array}{l} x ^ {+} = x - \mathbf {A} ^ {\top} y _ {m} \\ y ^ {+} = y + \mathbf {A} x ^ {+} \end{array} , \right. \tag {19}
|
| 275 |
+
$$
|
| 276 |
+
|
| 277 |
+
where $y_{m} \in \mathbb{R}^{d_{y}}$ is a newly introduced variable. It's easy to observe that (19) is equivalent to (18) when $y_{m} = y^{+}$ . Next, notice that choosing $y_{m} = y$ makes (19) easy to solve. However, it turns out that the convergence analysis of an algorithm with this approximation may be challenging (Zhang et al., 2021a), especially if we want to combine it with other techniques, such as acceleration. Our key idea is to propose a better alternative: the linear extrapolation step
|
| 278 |
+
|
| 279 |
+
$$
|
| 280 |
+
y _ {m} = y + \theta \left(y - y ^ {-}\right), \tag {20}
|
| 281 |
+
$$
|
| 282 |
+
|
| 283 |
+
where $y^{-} \in \mathbb{R}^{d_y}$ corresponds to $y$ obtained from the previous iteration of the Forward Backward algorithm, and $\theta \in (0,1]$ is an extrapolation parameter. The linear extrapolation step was introduced by Chambolle and Pock (2011) in the analysis of the Primal-Dual Hybrid Gradient algorithm<sup>6</sup>.
|
| 284 |
+
|
| 285 |
+
Nesterov acceleration. Next, we note that operator $G_{1}$ is equal to the gradient of the (potential) function $(x,y)\mapsto f(x) + g(y)$ function. This function is smooth and convex due to Assumptions 1 and 2. This allows us to incorporate the Nesterov acceleration mechanism in the Forward Backward algorithm. Nesterov acceleration is known to be a powerful tool which allows to improve convergence properties of gradient methods (Nesterov, 1983, 2003).
|
| 286 |
+
|
| 287 |
+
# 4.2 Convergence of the algorithm
|
| 288 |
+
|
| 289 |
+
We are now ready to study the convergence properties of Algorithm 1. We are interested in the case when the following condition holds:
|
| 290 |
+
|
| 291 |
+
$$
|
| 292 |
+
\min \left\{\max \left\{\mu_ {x}, \mu_ {y x} \right\}, \max \left\{\mu_ {y}, \mu_ {x y} \right\} \right\} > 0. \tag {21}
|
| 293 |
+
$$
|
| 294 |
+
|
| 295 |
+
In this case one can show that the solution set $S$ of problem (1) is nonempty. Moreover, strong duality holds in this case, as captured by the following lemma.
|
| 296 |
+
|
| 297 |
+
Lemma 1. Let Assumptions 1, 2 and 3 and condition (21) hold. Let $p$ be the optimal value of the primal problem
|
| 298 |
+
|
| 299 |
+
$$
|
| 300 |
+
p = \min _ {x \in \mathbb {R} ^ {d _ {x}}} [ P (x) = f (x) + g ^ {*} (\mathbf {A} x) ], \tag {22}
|
| 301 |
+
$$
|
| 302 |
+
|
| 303 |
+
and let $d$ be the optimal value of the dual problem
|
| 304 |
+
|
| 305 |
+
$$
|
| 306 |
+
d = \max _ {y \in \mathbb {R} ^ {d _ {y}}} \left[ D (y) = - g (y) - f ^ {*} (- \mathbf {A} ^ {\top} y) \right]. \tag {23}
|
| 307 |
+
$$
|
| 308 |
+
|
| 309 |
+
Then $p = d$ is finite and $(x^{*},y^{*})\in S$ if and only if $x^{*}$ is a solution of the primal problem (22) and $y^{*}$ is a solution of the dual problem (23).
|
| 310 |
+
|
| 311 |
+
Under the aforementioned conditions, Algorithm 1 achieves linear convergence. That is, the iteration complexity is proportional to $\log \frac{1}{\epsilon}$ .
|
| 312 |
+
|
| 313 |
+
Theorem 1. Let Assumptions 1, 2 and 3 and condition (21) hold. Then there exist parameters of Algorithm 1 such that its iteration complexity for finding an $\epsilon$ -accurate solution of problem (1) is
|
| 314 |
+
|
| 315 |
+
$$
|
| 316 |
+
\mathcal {O} \left(\min \left\{T _ {a}, T _ {b}, T _ {c}, T _ {d} \right\} \log \frac {C}{\epsilon}\right), \tag {24}
|
| 317 |
+
$$
|
| 318 |
+
|
| 319 |
+
where $T_{a}, T_{b}, T_{c}, T_{d}$ are defined as
|
| 320 |
+
|
| 321 |
+
$$
|
| 322 |
+
T _ {a} = \max \left\{\sqrt {\frac {L _ {x}}{\mu_ {x}}}, \sqrt {\frac {L _ {y}}{\mu_ {y}}}, \frac {L _ {x y}}{\sqrt {\mu_ {x} \mu_ {y}}} \right\}, \qquad T _ {b} = \max \left\{\frac {\sqrt {L _ {x} L _ {y}}}{\mu_ {x y}}, \frac {L _ {x y}}{\mu_ {x y}} \sqrt {\frac {L _ {x}}{\mu_ {x}}}, \frac {L _ {x y} ^ {2}}{\mu_ {x y} ^ {2}} \right\},
|
| 323 |
+
$$
|
| 324 |
+
|
| 325 |
+
$$
|
| 326 |
+
T _ {c} = \max \left\{\frac {\sqrt {L _ {x} L _ {y}}}{\mu_ {y x}}, \frac {L _ {x y}}{\mu_ {y x}} \sqrt {\frac {L _ {y}}{\mu_ {y}}} , \frac {L _ {x y} ^ {2}}{\mu_ {y x} ^ {2}} \right\}, \qquad T _ {d} = \max \left\{\frac {\sqrt {L _ {x} L _ {y}} L _ {x y}}{\mu_ {x y} \mu_ {y x}}, \frac {L _ {x y} ^ {2}}{\mu_ {y x} ^ {2}}, \frac {L _ {x y} ^ {2}}{\mu_ {x y} ^ {2}} \right\},
|
| 327 |
+
$$
|
| 328 |
+
|
| 329 |
+
and $C > 0$ is some constant, which does not depend on $\epsilon$ , but possibly depends on $L_x, \mu_x, L_y, \mu_y, L_{xy}, \mu_{xy}, \mu_{yx}$ .
|
| 330 |
+
|
| 331 |
+
# 5 Discussion of Theorem 1 and Related Work
|
| 332 |
+
|
| 333 |
+
In this section we comment on the iteration complexity result for Algorithm 1 provided in Theorem 1. We consider important and illustrative special cases of this complexity result and draw connections with the existing results in the literature.
|
| 334 |
+
|
| 335 |
+
# 5.1 Strongly convex and strongly concave case
|
| 336 |
+
|
| 337 |
+
In this case $\mu_x, \mu_y > 0$ . We can always assume $\mu_{xy} = \mu_{yx} = 0$ in Assumption 3. Then, Algorithm 1 has iteration complexity given by
|
| 338 |
+
|
| 339 |
+
$$
|
| 340 |
+
\mathcal {O} \left(\max \left\{\sqrt {\frac {L _ {x}}{\mu_ {x}}}, \sqrt {\frac {L _ {y}}{\mu_ {y}}}, \frac {L _ {x y}}{\sqrt {\mu_ {x} \mu_ {y}}} \right\} \log \frac {1}{\epsilon}\right). \tag {25}
|
| 341 |
+
$$
|
| 342 |
+
|
| 343 |
+
This improves the current state-of-the-art results
|
| 344 |
+
|
| 345 |
+
$$
|
| 346 |
+
\tilde {\mathcal {O}} \left(\max \left\{\sqrt [ 4 ]{L _ {x} ^ {2} L _ {y}}, \sqrt [ 4 ]{\frac {L _ {x} L _ {y} ^ {2}}{\mu_ {x} \mu_ {y} ^ {2}}}, \frac {L _ {x y}}{\sqrt {\mu_ {x} \mu_ {y}}} \right\} \log \frac {1}{\epsilon}\right) \tag {26}
|
| 347 |
+
$$
|
| 348 |
+
|
| 349 |
+
due to Xie et al. (2021), and
|
| 350 |
+
|
| 351 |
+
$$
|
| 352 |
+
\tilde {\mathcal {O}} \left(\max \left\{\sqrt {\frac {L _ {x}}{\mu_ {x}}}, \sqrt {\frac {L _ {y}}{\mu_ {y}}}, \sqrt {\frac {L _ {x y} L}{\mu_ {x} \mu_ {y}}} \right\} \log \frac {1}{\epsilon}\right), \tag {27}
|
| 353 |
+
$$
|
| 354 |
+
|
| 355 |
+
due to Wang and Li (2020), where $\tilde{\mathcal{O}} (\cdot)$ hides additional logarithmic factors, and $L = \max \{L_{x},L_{y},L_{xy}\}$ . Moreover, our result (25) matches the lower complexity bound provided by Zhang et al. (2021b). Hence, Algorithm 1 is optimal in this regime.
|
| 356 |
+
|
| 357 |
+
Apart from our work, algorithms that achieve optimal complexity (25) were developed in three independent works by Thekumparampil et al. (2022); Jin et al. (2022); Du et al. (2022). However, to the best of our knowledge these works were published or appeared on arXiv in 2022, while our work appeared on arXiv in 2021. Hence, Algorithm 1 is the first algorithm which achieves the lower complexity bound (25) for smooth and strongly-convex-strongly-concave saddle-point problems with bilinear coupling.
|
| 358 |
+
|
| 359 |
+
# 5.2 Affinely-constrained minimization case
|
| 360 |
+
|
| 361 |
+
In this case $\mu_x > 0$ and $\mu_y = 0$ . Firstly, we consider the case when $L_y = 0$ , i.e., $g(y)$ is a linear function. Then, problem (1) is equivalent to the smooth and strongly-convex affinely-constrained minimization problem (7). Algorithm 1 enjoys the linear convergence rate
|
| 362 |
+
|
| 363 |
+
$$
|
| 364 |
+
\mathcal {O} \left(\max \left\{\frac {L _ {x y}}{\mu_ {x y}} \sqrt {\frac {L _ {x}}{\mu_ {x}}}, \frac {L _ {x y} ^ {2}}{\mu_ {x y} ^ {2}} \right\} \log \frac {1}{\epsilon}\right), \tag {28}
|
| 365 |
+
$$
|
| 366 |
+
|
| 367 |
+
where $\mu_{xy} = \lambda_{\mathrm{min}}^{+}(\mathbf{A}\mathbf{A}^{\top}) > 0$ due to Assumption 3. This result recovers the complexity of the APAPC algorithm (Kovalev et al., 2020). It is possible to incorporate the Chebyshev acceleration mechanism (Arioli and Scott, 2014) into Algorithm 1 for solving problem (7) to obtain the improved complexity
|
| 368 |
+
|
| 369 |
+
$$
|
| 370 |
+
\mathcal {O} \left(\frac {L _ {x y}}{\mu_ {x y}} \sqrt {\frac {L _ {x}}{\mu_ {x}}} \log \frac {1}{\epsilon}\right). \tag {29}
|
| 371 |
+
$$
|
| 372 |
+
|
| 373 |
+
This matches the complexity of the OPAPC algorithm of Kovalev et al. (2020); Salim et al. (2021), which was shown to be optimal (Salim et al., 2021; Scaman et al., 2017).
|
| 374 |
+
|
| 375 |
+
# 5.3 Strongly convex and concave case
|
| 376 |
+
|
| 377 |
+
We also allow $L_{y} > 0$ , i.e., function $g(y)$ is a general, not necessarily linear, smooth and convex function. It is often possible that $\mu_{xy} > 0$ due to Assumption 3; for instance, when $\mathbf{A}$ is a full row rank matrix. Then, Algorithm 1 enjoys the following linear iteration complexity:
|
| 378 |
+
|
| 379 |
+
$$
|
| 380 |
+
\mathcal {O} \left(\max \left\{\frac {\sqrt {L _ {x} L _ {y}}}{\mu_ {x y}}, \frac {L _ {x y}}{\mu_ {x y}} \sqrt {\frac {L _ {x}}{\mu_ {x}}} , \frac {L _ {x y} ^ {2}}{\mu_ {x y} ^ {2}} \right\} \log \frac {1}{\epsilon}\right). \tag {30}
|
| 381 |
+
$$
|
| 382 |
+
|
| 383 |
+
This case was previously studied by Du and Hu (2019); Du et al. (2017); Zhang et al. (2021a). Du and Hu (2019) provided an analysis for an algorithm called Sim-GDA, and established its iteration complexity
|
| 384 |
+
|
| 385 |
+
$$
|
| 386 |
+
\mathcal {O} \left(\max \left\{\frac {L _ {x} ^ {3} L _ {y} L _ {x y} ^ {2}}{\mu_ {x} ^ {2} \mu_ {x y} ^ {4}}, \frac {L _ {x} ^ {3} L _ {x y} ^ {4}}{\mu_ {x} ^ {3} \mu_ {x y} ^ {4}} \right\} \log \frac {1}{\epsilon}\right). \tag {31}
|
| 387 |
+
$$
|
| 388 |
+
|
| 389 |
+
This result is substantially worse than our complexity (30); possibly due to a suboptimal analysis. Subsequently, Zhang et al. (2021a) provided an improved analysis for the Sim-GDA algorithm, obtaining the complexity
|
| 390 |
+
|
| 391 |
+
$$
|
| 392 |
+
\mathcal {O} \left(\max \left\{\frac {L ^ {3}}{\mu_ {x} \mu_ {x y} ^ {2}}, \frac {L ^ {2}}{\mu_ {x} ^ {2}} \right\} \log \frac {1}{\epsilon}\right). \tag {32}
|
| 393 |
+
$$
|
| 394 |
+
|
| 395 |
+
They also studied the Alt-GDA method, obtaining the complexity
|
| 396 |
+
|
| 397 |
+
$$
|
| 398 |
+
\mathcal {O} \left(\max \left\{\frac {L ^ {2}}{\mu_ {x y} ^ {2}}, \frac {L}{\mu_ {x}} \right\} \log \frac {1}{\epsilon}\right), \tag {33}
|
| 399 |
+
$$
|
| 400 |
+
|
| 401 |
+
where $L = \max \{L_x, L_y, L_{xy}\}$ . However, these results are local, i.e., they are valid only if the initial iterates of these algorithms are close enough to the solution of problem (1). Moreover, these results are still worse than our rate (30) because Sim-GDA and Alt-GDA do not utilize the Nesterov acceleration mechanism, while our Algorithm 1 does.
|
| 402 |
+
|
| 403 |
+
# 5.4 Bilinear case
|
| 404 |
+
|
| 405 |
+
In this case $\mu_x = \mu_y = L_x = L_y = 0$ . That is, functions $f(x)$ and $g(y)$ are linear. Then, problem (1) turns into the bilinear min-max problem (10), and $\mu_{xy}^2 = \mu_{yx}^2 = \lambda_{\min}^+ (\mathbf{A}^\top \mathbf{A}) > 0$ due to Assumption 3. The iteration complexity of Algorithm 1 becomes
|
| 406 |
+
|
| 407 |
+
$$
|
| 408 |
+
\mathcal {O} \left(\frac {L _ {x y} ^ {2}}{\mu_ {x y} ^ {2}} \log \frac {1}{\epsilon}\right). \tag {34}
|
| 409 |
+
$$
|
| 410 |
+
|
| 411 |
+
This recovers the results of Daskalakis et al. (2018); Liang and Stokes (2019); Gidel et al. (2018, 2019); Mishchenko et al. (2020); Mokhtari et al. (2020) for the bilinear min-max problem (10). However, this result is worse than the complexity lower bound
|
| 412 |
+
|
| 413 |
+
$$
|
| 414 |
+
\mathcal {O} \left(\frac {L _ {x y}}{\mu_ {x y}} \log \frac {1}{\epsilon}\right), \tag {35}
|
| 415 |
+
$$
|
| 416 |
+
|
| 417 |
+
obtained in the work of Ibrahim et al. (2020), which was reached by Azizian et al. (2020); Du et al. (2022)<sup>7</sup>.
|
| 418 |
+
|
| 419 |
+
# 5.5 Convex-concave case
|
| 420 |
+
|
| 421 |
+
In this case $\mu_y = \mu_x = 0$ . It is often possible that $\mu_{xy} = \mu_{yx} > 0$ due to Assumption 3, for example, when $\mathbf{A}$ is a square and full rank matrix. Then, the iteration complexity of Algorithm 1 becomes
|
| 422 |
+
|
| 423 |
+
$$
|
| 424 |
+
\mathcal {O} \left(\max \left\{\frac {\sqrt {L _ {x} L _ {y}} L _ {x y}}{\mu_ {x y} ^ {2}}, \frac {L _ {x y} ^ {2}}{\mu_ {x y} ^ {2}} \right\} \log \frac {1}{\epsilon}\right), \tag {36}
|
| 425 |
+
$$
|
| 426 |
+
|
| 427 |
+
which is still linear. This complexity result generalizes the result (34) for bilinear min-max problems as it allows for general, not necessarily linear, convex and smooth functions $f(x)$ and $g(x)$ . To the best of our knowledge, Algorithm 1 is the first algorithm which can achieve linear convergence for smooth and non-strongly convex non-strongly concave min-max problems with bilinear coupling.
|
| 428 |
+
|
| 429 |
+
# Acknowledgements
|
| 430 |
+
|
| 431 |
+
The work of Alexander Gasnikov was supported by a grant for research centers in the field of artificial intelligence, provided by the Analytical Center for the Government of the Russian Federation in accordance with the subsidy agreement (agreement identifier 000000D730321P5Q0002) and the agreement with the Ivannikov Institute for System Programming of the Russian Academy of Sciences dated November 2, 2021 No. 70-2021-00142.
|
| 432 |
+
|
| 433 |
+
# References
|
| 434 |
+
|
| 435 |
+
Alkousa, M., Gasnikov, A., Dvinskikh, D., Kovalev, D., and Stonyakin, F. (2020). Accelerated methods for saddle-point problem. Computational Mathematics and Mathematical Physics, 60(11):1787-1809.
|
| 436 |
+
Arioli, M. and Scott, J. (2014). Chebyshev acceleration of iterative refinement. Numerical Algorithms, 66(3):591-608.
|
| 437 |
+
Arjevani, Y., Bruna, J., Can, B., Gurbuzbalaban, M., Jegelka, S., and Lin, H. (2020). Ideal: Inexact decentralized accelerated augmented lagrangian method. arXiv preprint arXiv:2006.06733.
|
| 438 |
+
Azizian, W., Scieur, D., Mitliagkas, I., Lacoste-Julien, S., and Gidel, G. (2020). Accelerating smooth games by manipulating spectral shapes. In International Conference on Artificial Intelligence and Statistics, pages 1705-1715. PMLR.
|
| 439 |
+
Bauschke, H. H. and Combettes, P. L. (2011). Convex Analysis and Monotone Operator Theory in Hilbert Spaces. Springer.
|
| 440 |
+
Chambolle, A. and Pock, T. (2011). A first-order primal-dual algorithm for convex problems with applications to imaging. Journal of mathematical imaging and vision, 40(1):120-145.
|
| 441 |
+
Chambolle, A. and Pock, T. (2016). An introduction to continuous optimization for imaging. Acta Numerica, 25:161-319.
|
| 442 |
+
Cohen, M. B., Sidford, A., and Tian, K. (2020). Relative lipschitzness in extragradient methods and a direct recipe for acceleration. arXiv preprint arXiv:2011.06572.
|
| 443 |
+
Daskalakis, C., Ilyas, A., Syrgkanis, V., and Zeng, H. (2018). Training gans with optimism. In International Conference on Learning Representations (ICLR 2018).
|
| 444 |
+
Du, S. S., Chen, J., Li, L., Xiao, L., and Zhou, D. (2017). Stochastic variance reduction methods for policy evaluation. In International Conference on Machine Learning, pages 1049-1058. PMLR.
|
| 445 |
+
Du, S. S., Gidel, G., Jordan, M. I., and Li, C. J. (2022). Optimal extragradient-based bilinearly-coupled saddle-point optimization. arXiv preprint arXiv:2206.08573.
|
| 446 |
+
Du, S. S. and Hu, W. (2019). Linear convergence of the primal-dual gradient method for convex-concave saddle point problems without strong convexity. In The 22nd International Conference on Artificial Intelligence and Statistics, pages 196-205. PMLR.
|
| 447 |
+
|
| 448 |
+
Gidel, G., Berard, H., Vignoud, G., Vincent, P., and Lacoste-Julien, S. (2018). A variational inequality perspective on generative adversarial networks. arXiv preprint arXiv:1802.10551.
|
| 449 |
+
Gidel, G., Hemmat, R. A., Pezeshki, M., Le Priol, R., Huang, G., Lacoste-Julien, S., and Mitliagkas, I. (2019). Negative momentum for improved game dynamics. In The 22nd International Conference on Artificial Intelligence and Statistics, pages 1802-1811. PMLR.
|
| 450 |
+
Ibrahim, A., Azizian, W., Gidel, G., and Mitliagkas, I. (2020). Linear lower bounds and conditioning of differentiable games. In International Conference on Machine Learning, pages 4583-4593. PMLR.
|
| 451 |
+
Jin, Y., Sidford, A., and Tian, K. (2022). Sharper rates for separable minimax and finite sum optimization via primal-dual extragradient methods. arXiv preprint arXiv:2202.04640.
|
| 452 |
+
Keriven, N., Bourrier, A., Gribonval, R., and Pérez, P. (2018). Sketching for large-scale learning of mixture models. Information and Inference: A Journal of the IMA, 7(3):447-508.
|
| 453 |
+
Korpelevich, G. M. (1976). The extragradient method for finding saddle points and other problems. Matecon, 12:747-756.
|
| 454 |
+
Kovalev, D., Salim, A., and Richtárik, P. (2020). Optimal and practical algorithms for smooth and strongly convex decentralized optimization. Advances in Neural Information Processing Systems, 33.
|
| 455 |
+
Lei, Q., Yen, I. E.-H., Wu, C.-y., Dhillon, I. S., and Ravikumar, P. (2017). Doubly greedy primal-dual coordinate descent for sparse empirical risk minimization. In International Conference on Machine Learning, pages 2034-2042. PMLR.
|
| 456 |
+
Li, H., Lin, Z., and Fang, Y. (2020). Optimal accelerated variance reduced extra and digging for strongly convex and smooth decentralized optimization. arXiv e-prints, pages arXiv-2009.
|
| 457 |
+
Liang, T. and Stokes, J. (2019). Interaction matters: A note on non-asymptotic local convergence of generative adversarial networks. In The 22nd International Conference on Artificial Intelligence and Statistics, pages 907-915. PMLR.
|
| 458 |
+
Lin, T., Jin, C., and Jordan, M. I. (2020). Near-optimal algorithms for minimax optimization. In Conference on Learning Theory, pages 2738-2779. PMLR.
|
| 459 |
+
Mishchenko, K., Kovalev, D., Shulgin, E., Richtárik, P., and Malitsky, Y. (2020). Revisiting stochastic extragradients. In International Conference on Artificial Intelligence and Statistics, pages 4573-4582. PMLR.
|
| 460 |
+
Mokhtari, A., Ozdaglar, A., and Pattathil, S. (2020). A unified analysis of extra-gradient and optimistic gradient methods for saddle point problems: Proximal point approach. In International Conference on Artificial Intelligence and Statistics, pages 1497-1507. PMLR.
|
| 461 |
+
Nedic, A., Olshevsky, A., and Shi, W. (2017). Achieving geometric convergence for distributed optimization over time-varying graphs. SIAM Journal on Optimization, 27(4):2597-2633.
|
| 462 |
+
Nesterov, Y. (2003). Introductory lectures on convex optimization: A basic course, volume 87. Springer Science & Business Media.
|
| 463 |
+
Nesterov, Y. and Scriminali, L. (2006). Solving strongly monotone variational and quasi-variational inequalities.
|
| 464 |
+
Nesterov, Y. E. (1983). A method for solving the convex programming problem with convergence rate o $(1 / \mathrm{k}^{\hat{}}2)$ . In Dokl. akad. nauk Sssr, volume 269, pages 543-547.
|
| 465 |
+
Peyre, G., Cuturi, M., et al. (2019). Computational optimal transport: With applications to data science. Foundations and Trends® in Machine Learning, 11(5-6):355-607.
|
| 466 |
+
Salim, A., Condat, L., Kovalev, D., and Richtárik, P. (2021). An optimal algorithm for strongly convex minimization under affine constraints. arXiv preprint arXiv:2102.11079.
|
| 467 |
+
|
| 468 |
+
Scaman, K., Bach, F., Bubeck, S., Lee, Y. T., and Massoulie, L. (2017). Optimal algorithms for smooth and strongly convex distributed optimization in networks. In international conference on machine learning, pages 3027-3036. PMLR.
|
| 469 |
+
Thekumparampil, K. K., He, N., and Oh, S. (2022). Lifted primal-dual method for bilinearly coupled smooth minimax optimization. In International Conference on Artificial Intelligence and Statistics, pages 4281-4308. PMLR.
|
| 470 |
+
Wang, J. and Xiao, L. (2017). Exploiting strong convexity from data with primal-dual first-order algorithms. In International Conference on Machine Learning, pages 3694-3702. PMLR.
|
| 471 |
+
Wang, Y. and Li, J. (2020). Improved algorithms for convex-concave minimax optimization. arXiv preprint arXiv:2006.06359.
|
| 472 |
+
Xiao, L., Yu, A. W., Lin, Q., and Chen, W. (2019). Dscovr: Randomized primal-dual block coordinate algorithms for asynchronous distributed optimization. The Journal of Machine Learning Research, 20(1):1634-1691.
|
| 473 |
+
Xie, G., Han, Y., and Zhang, Z. (2021). Dippa: An improved method for bilinear saddle point problems. arXiv preprint arXiv:2103.08270.
|
| 474 |
+
Ye, H., Luo, L., Zhou, Z., and Zhang, T. (2020). Multi-consensus decentralized accelerated gradient descent. arXiv preprint arXiv:2005.00797.
|
| 475 |
+
Zargham, M., Ribeiro, A., Ozdaglar, A., and Jadbabaie, A. (2013). Accelerated dual descent for network flow optimization. IEEE Transactions on Automatic Control, 59(4):905-920.
|
| 476 |
+
Zhang, G., Wang, Y., Lessard, L., and Grosse, R. (2021a). Don't fix what ain't broke: Near-optimal local convergence of alternating gradient descent-ascent for minimax optimization. arXiv preprint arXiv:2102.09468.
|
| 477 |
+
Zhang, J., Hong, M., and Zhang, S. (2021b). On lower iteration complexity bounds for the convex concave saddle point problems. Mathematical Programming, pages 1-35.
|
| 478 |
+
Zhang, Y. and Lin, X. (2015). Stochastic primal-dual coordinate method for regularized empirical risk minimization. In International Conference on Machine Learning, pages 353-361. PMLR.
|
| 479 |
+
|
| 480 |
+
# Checklist
|
| 481 |
+
|
| 482 |
+
1. For all authors...
|
| 483 |
+
|
| 484 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 485 |
+
(b) Did you describe the limitations of your work? [No]
|
| 486 |
+
(c) Did you discuss any potential negative societal impacts of your work? [No] This is a theoretical work with no foreseeable negative societal impact.
|
| 487 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 488 |
+
|
| 489 |
+
2. If you are including theoretical results...
|
| 490 |
+
|
| 491 |
+
(a) Did you state the full set of assumptions of all theoretical results? [Yes] Assumptions 1 to 3.
|
| 492 |
+
(b) Did you include complete proofs of all theoretical results? [Yes] see Appendix.
|
| 493 |
+
|
| 494 |
+
3. If you ran experiments...
|
| 495 |
+
|
| 496 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [N/A]
|
| 497 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [N/A]
|
| 498 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [N/A]
|
| 499 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [N/A]
|
| 500 |
+
|
| 501 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 502 |
+
|
| 503 |
+
(a) If your work uses existing assets, did you cite the creators? [N/A]
|
| 504 |
+
(b) Did you mention the license of the assets? [N/A]
|
| 505 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [N/A]
|
| 506 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
|
| 507 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [N/A]
|
| 508 |
+
|
| 509 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 510 |
+
|
| 511 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
|
| 512 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 513 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
acceleratedprimaldualgradientmethodforsmoothandconvexconcavesaddlepointproblemswithbilinearcoupling/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a993fcc0b51c0e97c3404b37aa5dffe651bff3e9362696ce3878a16538eb82dc
|
| 3 |
+
size 375937
|
acceleratedprimaldualgradientmethodforsmoothandconvexconcavesaddlepointproblemswithbilinearcoupling/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7f44bc769429930b01d83f4fa16005b3f331a4cec5c156bd84eb2b5d1c200499
|
| 3 |
+
size 534720
|
acceleratedprojectedgradientalgorithmsforsparsityconstrainedoptimizationproblems/332a70d0-20c8-4f03-a505-ef6d7184b7b7_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d4b3ea2be7a2935f3fcc0b5bdbb3f60dec671d0835391f0ca53b8606a8aa8efb
|
| 3 |
+
size 84302
|
acceleratedprojectedgradientalgorithmsforsparsityconstrainedoptimizationproblems/332a70d0-20c8-4f03-a505-ef6d7184b7b7_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e5b2343ed1317e0dec5048de95c584f5bd024e273c71a8b553b8530fc5fa9f5
|
| 3 |
+
size 104767
|
acceleratedprojectedgradientalgorithmsforsparsityconstrainedoptimizationproblems/332a70d0-20c8-4f03-a505-ef6d7184b7b7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7d62150fbb0864c58ab3bde8f910e273c05b5bae4f8d21e871348d6ac1725180
|
| 3 |
+
size 625082
|
acceleratedprojectedgradientalgorithmsforsparsityconstrainedoptimizationproblems/full.md
ADDED
|
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerated Projected Gradient Algorithms for Sparsity Constrained Optimization Problems
|
| 2 |
+
|
| 3 |
+
Jan Harold Alcantara
|
| 4 |
+
|
| 5 |
+
Academia Sinica
|
| 6 |
+
|
| 7 |
+
Taipei, Taiwan
|
| 8 |
+
|
| 9 |
+
jan.harold.alcantara@gmail.com
|
| 10 |
+
|
| 11 |
+
Ching-pei Lee
|
| 12 |
+
|
| 13 |
+
Academia Sinica
|
| 14 |
+
|
| 15 |
+
Taipei, Taiwan
|
| 16 |
+
|
| 17 |
+
leechingpei@gmail.com
|
| 18 |
+
|
| 19 |
+
# Abstract
|
| 20 |
+
|
| 21 |
+
We consider the projected gradient algorithm for the nonconvex best subset selection problem that minimizes a given empirical loss function under an $\ell_0$ -norm constraint. Through decomposing the feasible set of the given sparsity constraint as a finite union of linear subspaces, we present two acceleration schemes with global convergence guarantees, one by same-space extrapolation and the other by subspace identification. The former fully utilizes the problem structure to greatly accelerate the optimization speed with only negligible additional cost. The latter leads to a two-stage meta-algorithm that first uses classical projected gradient iterations to identify the correct subspace containing an optimal solution, and then switches to a highly-efficient smooth optimization method in the identified subspace to attain superlinear convergence. Experiments demonstrate that the proposed accelerated algorithms are magnitudes faster than their non-accelerated counterparts as well as the state of the art.
|
| 22 |
+
|
| 23 |
+
# 1 Introduction
|
| 24 |
+
|
| 25 |
+
We consider the sparsity-constrained optimization problem in $\Re^n$ :
|
| 26 |
+
|
| 27 |
+
$$
|
| 28 |
+
\min _ {w \in A _ {s}} f (w), \tag {1}
|
| 29 |
+
$$
|
| 30 |
+
|
| 31 |
+
where $f$ is convex with $L$ -Lipschitz continuous gradient, $s \in \mathbb{N}$ , and $A_{s}$ is the sparsity set given by
|
| 32 |
+
|
| 33 |
+
$$
|
| 34 |
+
A _ {s} := \left\{w \in \Re^ {n}: \| w \| _ {0} \leq s \right\}, \tag {2}
|
| 35 |
+
$$
|
| 36 |
+
|
| 37 |
+
where $\| w\| _0$ denotes the $\ell_0$ -norm that indicates the number of nonzero components in $w$ . We further assume that $f$ is lower-bounded on $A_{s}$ .
|
| 38 |
+
|
| 39 |
+
A classical problem that fits in the framework of (1) is the best subset selection problem in linear regression [6, 20]. Given a response vector $y \in \Re^{m}$ and a design matrix of explanatory variables $X \in \Re^{m \times n}$ , traditional linear regression minimizes a least squares (LS) loss function
|
| 40 |
+
|
| 41 |
+
$$
|
| 42 |
+
f (w) = \| y - X w \| ^ {2} / 2. \tag {3}
|
| 43 |
+
$$
|
| 44 |
+
|
| 45 |
+
However, due to either high dimensionality in terms of the number of features $n$ or having significantly fewer instances $m$ than features $n$ (i.e., $m \ll n$ ), we often seek a linear model that selects only a subset of the explanatory variables that will best predict the outcome $y$ . Towards this goal, we can solve (1) with $f$ given by (3) to fit the training data while simultaneously selecting the best- $s$ features. Indeed, such a sparse linear regression problem is fundamental in many scientific applications, such as high-dimensional statistical learning and signal processing [22]. The loss in (3) can be generalized to the following linear empirical risk to cover various tasks in machine learning beyond regression
|
| 46 |
+
|
| 47 |
+
$$
|
| 48 |
+
f (w) = g (X w), \quad g (z) = \sum_ {i = 1} ^ {m} g _ {i} \left(z _ {i}\right), \tag {4}
|
| 49 |
+
$$
|
| 50 |
+
|
| 51 |
+
where $g$ is convex. Such a problem structure makes evaluations of the objective and its derivatives highly efficient, and such efficient computation is a key motivation for our algorithms for (1).
|
| 52 |
+
|
| 53 |
+
Related Works. The discontinuous cardinality constraint in (1) makes the problem difficult to solve. To make the optimization problem easier, a popular approach is to slightly sacrifice the quality of the solution (either not strictly satisfying the sparsity level constraint or the prediction performance is deteriorated) to use continuous surrogate functions for the $\ell_0$ -norm, which lead to a continuous nonlinear programming problem, where abundant algorithms are at our disposal. For instance, using a convex penalty surrogate such as the $\ell_1$ -norm in the case of LASSO [36], the problem (1) can be relaxed into a convex (unconstrained) one that can be efficiently solved by many algorithms. Other algorithms based on continuous nonconvex relaxations such as the use of smoothly clipped absolute deviation [15] and the minimax concave penalty [41] regularizers are also popular in scenarios with a higher level of noise and outliers in the data. However, for applications in which enforcing the constraints or getting the best prediction performance is of utmost importance, solving the original problem (1) is inevitable. (For a detailed review, we refer the interested reader to [11, Section 1].) Unfortunately, methods for (1) are not as well-studied as those for the surrogate problems. Moreover, existing methods are indeed still preliminary and too slow to be useful in large-scale problems often faced in modern machine learning tasks.
|
| 54 |
+
|
| 55 |
+
In view of the present unsatisfactory status for scenarios that simultaneously involve high-volume data and need to get the best prediction performance, this work proposes efficient algorithms to directly solve (1) with large-scale data. To our knowledge, all the most popular algorithms that directly tackle (1) without the use of surrogates involve using the well-known projected gradient (PG) algorithm, at least as a major component [10-13, 3]. [10] proved linear convergence of the objective value with the LS loss function (3) for the iterates generated by PG under a scalable restricted isometry property, which also served as their tool to accelerate PG. However, given any problem instance, it is hard, if not computationally impossible, to verify whether the said property holds. On the other hand, [11] established global subsequential convergence to a stationary point for the iterates of PG on (1) without the need for such isometry conditions, and their results are valid for general loss functions $f$ beyond (3). While some theoretical guarantees are known, the practicality of PG for solving (1) remains a big problem in real-world applications as its empirical convergence speed tends to be slow. The PG approach is called iterative hard thresholding (IHT) in studies of compressed sensing [13] that mainly focuses on the LS case. To accelerate IHT, several approaches that alternate between a PG step and a subspace optimization step are also proposed [12, 3], but such methods mainly focus on the LS case and statistical properties, while their convergence speed is less studied from an optimization perspective. Recently, "acceleration" approaches for PG on general nonconvex regularized problems have been studied in [27, 37]. While their proposed algorithms are also applicable to (1), the obtained convergence speed for nonconvex problems is not faster than that of PG.
|
| 56 |
+
|
| 57 |
+
This work is inspired by our earlier work [1], which considered a much broader class of problems without requiring convexity nor differentiability assumptions for $f$ , and hence obtained only much weaker convergence results, with barely any convergence rates, for such general problems.
|
| 58 |
+
|
| 59 |
+
Contributions. In this work, we revisit the PG algorithm for solving the general problem (1) and propose two acceleration schemes by leveraging the combinatorial nature of $\ell_0$ -norm. In particular, we decompose the feasible set $A_s$ as the finite union of $s$ -dimensional linear subspaces, each representing a subset of the coordinates $\{1, \dots, n\}$ , as detailed in (7) of Section 2. Such subspaces are utilized in devising techniques to efficiently accelerate PG. Our first acceleration scheme is based on a same-space extrapolation technique such that we conduct extrapolation only when two consecutive iterates $w_{k-1}$ and $w_k$ lie in the same subspace, and the step size for this extrapolation is determined by a spectral initialization combined with backtracking to ensure sufficient function decrease. This is motivated by the observation that for (4), objective and derivatives at the extrapolated point can be inferred efficiently through a linear combination of $Xw_{k-1}$ and $Xw_k$ . The second acceleration technique starts with plain PG, and when consecutive iterates stay in the same subspace, it begins to alternate between a full PG step and a truncated Newton step in the subspace to obtain superlinear convergence with extremely low computational cost. Our main contributions are as follows:
|
| 60 |
+
|
| 61 |
+
1. We prove that PG for (1) is globally convergent to a local optimum with a local linear rate, improving upon the sublinear results of Bertsimas et al. [11]. We emphasize that our framework, like [11], is applicable to general loss functions $f$ satisfying the convexity and smoothness
|
| 62 |
+
|
| 63 |
+
requirements, and therefore covers not only the classical sparse regression problem but also many other ones encompassed by the empirical risk minimization (ERM) framework.
|
| 64 |
+
|
| 65 |
+
2. By decomposing $A_{s}$ as the union of linear subspaces, we further show that PG is provably capable of identifying a subspace containing a local optimum of (1). By exploiting this property, we propose two acceleration strategies with practical implementation and convergence guarantees for the general problem class (1). Our acceleration provides both computational and theoretical advantages for convergence, and can in particular obtain superlinear convergence.
|
| 66 |
+
3. In comparison with existing acceleration methods for nonconvex problems [27, 37], this work provides new acceleration schemes with faster theoretical speeds (see Theorems 3.2 and 3.3), and beyond being applied to the classical PG algorithm, those schemes can also easily be combined with existing accelerated PG approaches to further make them converge even faster.
|
| 67 |
+
4. Numerical experiments exemplify the significant improvement in both iterations and running time brought by our acceleration methods, in particular over the projected gradient algorithm by [11] as well as the accelerated proximal gradient method for nonconvex problems proposed by [27].
|
| 68 |
+
|
| 69 |
+
This work is organized as follows. We review the projected gradient algorithm and prove its local linear convergence and subspace identification for arbitrary smooth loss functions in Section 2. In Section 3, we propose the acceleration schemes devised through decomposing the constraint set in (1) into subspaces of $\Re^n$ . Experiments in Section 4 then illustrate the effectiveness of the proposed acceleration techniques, and Section 5 concludes this work. All proofs, details of the experiment settings, and additional experiments are in the appendices.
|
| 70 |
+
|
| 71 |
+
# 2 Projected Gradient Algorithm
|
| 72 |
+
|
| 73 |
+
The projected gradient algorithm for solving (1) is given by the iterations
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
w ^ {k + 1} \in T _ {\mathrm {P G}} ^ {\lambda} \left(w ^ {k}\right) := P _ {A _ {s}} \left(w ^ {k} - \lambda \nabla f \left(w ^ {k}\right)\right), \tag {5}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
where $P_{A_s}(w)$ denotes the projection of $w$ onto $A_s$ , which is set-valued because of the nonconvexity of $A_s$ . When $f$ is given by (3), global linear convergence of this algorithm under a restricted isometry condition is established in [10]. For a general convex $f$ with $L$ -Lipschitz continuous gradients, that is,
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
\| \nabla f (w) - \nabla f \left(w ^ {\prime}\right) \| \leq L \| w - w ^ {\prime} \| \quad \forall w, w ^ {\prime} \in \Re^ {n}, \tag {6}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
the global subsequential convergence of (5) is proved in [11], but neither global nor local rates of convergence is provided. In this section, we present an alternative proof of global convergence and more importantly establish its local linear convergence.
|
| 86 |
+
|
| 87 |
+
A useful observation that we will utilize in the proofs of our coming convergence results is that the nonconvex set $A_{s}$ given by (2) can be decomposed as a finite union of subspaces in $\Re^n$ :
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
A _ {s} = \bigcup_ {J \in \mathcal {J} _ {s}} A _ {J}, \quad A _ {J} := \operatorname {s p a n} \left\{e _ {j}: j \in J \right\}, \quad \mathcal {J} _ {s} := \left\{J \subseteq \{1, 2, \dots , n \}: | J | = s \right\}, \tag {7}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
where $e_j$ is the $j$ th standard unit vector in $\Re^n$ . Throughout this paper, we assume that $\lambda \in (0,L^{-1})$ .
|
| 94 |
+
|
| 95 |
+
Theorem 2.1. Let $\{w^k\}$ be a sequence generated by (5). Then:
|
| 96 |
+
|
| 97 |
+
(a) (Subsequential convergence) Either $\{f(w^k)\}$ is strictly decreasing, or there exists $N > 0$ such that $w^k = w^N$ for all $k \geq N$ . In addition, any accumulation point $w^*$ of $\{w^k\}$ satisfies $w^* \in P_{A_s}(w^* - \lambda \nabla f(w^*))$ , and is hence a stationary point of (1).
|
| 98 |
+
(b) (Subspace identification and full convergence) There exists $N\in \mathbb{N}$ such that
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\left\{w ^ {k} \right\} _ {k = N} ^ {\infty} \subseteq \bigcup_ {J \in \mathcal {I} _ {w ^ {*}}} A _ {J}, \quad \mathcal {I} _ {w ^ {*}} := \left\{J \in \mathcal {J} _ {s}: w ^ {*} \in A _ {J} \right\}. \tag {8}
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
whenever $w^k \to w^*$ . In particular, if $T_{\mathrm{PG}}^\lambda(w^*)$ is a singleton for an accumulation point $w^*$ of $\{w^k\}$ , then $w^*$ is a local minimum for (1), $w^k \to w^*$ , and (8) holds.
|
| 105 |
+
|
| 106 |
+
(c) (Q-linear convergence) If $T_{\mathrm{PG}}^{\lambda}(w^{*})$ is a singleton for an accumulation point $w^{*}$ and $w \mapsto w - \lambda \nabla f(w)$ is a contraction over $A_J$ for all $J \in \mathcal{I}_{w^*}$ , then $\{w^k\}$ converges to $w^{*}$ at a Q-linear rate. In other words, there is $N_2 \in \mathbb{N}$ and $\gamma \in [0,1)$ such that
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
\left\| w ^ {k + 1} - w ^ {*} \right\| \leq \gamma \left\| w ^ {k} - w ^ {*} \right\|, \quad \forall k \geq N _ {2}. \tag {9}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
It is well-known that an optimal solution of (1) is also a stationary point of it [8, Theorem 2.2], and therefore (a) proves the global subsequential convergence of PG to candidate solutions of (1). Consider $z^{*} \coloneqq w^{*} - \lambda \nabla f(w^{*})$ , and let $\tau$ be a permutation of $\{1, \dots, n\}$ such that $z_{\tau(1)}^{*} \geq z_{\tau(2)}^{*} \geq \dots \geq z_{\tau(n)}^{*}$ .
|
| 113 |
+
|
| 114 |
+
The requirement of $T_{\mathrm{PG}}^{\lambda}(w^{*})$ being a singleton in Theorem 2.1 (b) then simply means the mild condition of $z_{\tau(s)}^{*} > z_{\tau(s+1)}^{*}$ , which is almost always true in practice. The requirement for (c) can be fulfilled when $f$ confined to $A_J$ is strongly convex, even if $f$ itself is not. This often holds true in practice when $f$ is of the form (4) and we restrict $s$ in (1) to be smaller than the number of data instances $m$ , and is thus also mild. The existence of a stationary point can be guaranteed when $\{w^k\}$ is a bounded sequence, often guaranteed when $f$ is coercive on $A_J$ for each $J \in \mathcal{J}_s$ .
|
| 115 |
+
|
| 116 |
+
In comparison to existing results in [11, 2, 14], parts (b) and (c) of Theorem 2.1 are new. In particular, part (b) provides a full convergence result that usually requires stronger regularity assumptions like the Kurdyka-Lojasiewicz (KL) condition [2, 14] (see also (21)) that requires the objective value to decrease proportionally with the minimum-norm subgradient in a neighborhood of the accumulation point, but we just need the very mild singleton condition right at the accumulation point only. Part (c) gives a local linear convergence for the PG iterates even if the problem is nonconvex, while the rates in [14] require a KL condition and the rate is measured in the objective value.
|
| 117 |
+
|
| 118 |
+
The following result further provides rates of convergence of the objective values even without the conventional KL assumption. The first rate below follows from [24].
|
| 119 |
+
|
| 120 |
+
Theorem 2.2. Let $\{w^k\}$ be a sequence generated by (5). If $w^k \to w^*$ , such as when $T_{\mathrm{PG}}^\lambda(w^*)$ is a singleton at an accumulation point $w^*$ of (5), then
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
f \left(w ^ {k}\right) - f \left(w ^ {*}\right) = o \left(k ^ {- 1}\right). \tag {10}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
Moreover, under the hypothesis of Theorem 2.1 (c), the objective converges to $f(w^{*})$ R-linearly, i.e.,
|
| 127 |
+
|
| 128 |
+
$$
|
| 129 |
+
f \left(w ^ {k}\right) - f \left(w ^ {*}\right) = O \left(\exp (- k)\right). \tag {11}
|
| 130 |
+
$$
|
| 131 |
+
|
| 132 |
+
By using Theorem 2.1, we can also easily get rates faster than (10) under a version of the KL condition that is easier to understand and verify than those assumed in existing works. In particular, existing analyses require the KL condition to hold in a neighborhood in $\Re^n$ of an accumulation point, but we just need it to hold around $w^*$ within $A_J$ for the restriction $f|_{A_J}$ for each $J \in \mathcal{I}_{w^*}$ . These results are postponed to Theorem 3.2 in the next section as the PG method is a special case of our acceleration framework.
|
| 133 |
+
|
| 134 |
+
# 3 Accelerated methods
|
| 135 |
+
|
| 136 |
+
The main focus of this work is the proposal in this section of new techniques with solid convergence guarantees to accelerate the PG algorithm presented in the preceding section. Our techniques fully exploit the subspace identification property described by the inclusion (8), as well as the problem structure of (4) to devise efficient algorithms.
|
| 137 |
+
|
| 138 |
+
We emphasize that the two acceleration strategies described below can be combined together, and they are also widely applicable such that they can be employed to other existing algorithms for (1) as long as such algorithms have a property similar to (8).
|
| 139 |
+
|
| 140 |
+
# 3.1 Acceleration by extrapolation
|
| 141 |
+
|
| 142 |
+
Traditional extrapolation techniques are found in the realm of convex optimization to accelerate algorithms [9, 31] with guaranteed convergence improvements, but were often only adopted as heuristics in the nonconvex setting, until some recent works showed that theoretical convergence can also be achieved [27, 37]. However, unlike the convex case, these extrapolation strategies for nonconvex problems do not lead to faster convergence speed nor an intuitive reason for doing so. An extrapolation step proceeds by choosing a positive stepsize along the direction determined by two consecutive iterates. That is, given two iterates $w^{k-1}$ and $w^k$ , an intermediate point $z^k \coloneqq w^k + t_k(w^k - w^{k-1})$ for some stepsize $t_k \geq 0$ is first calculated before applying the original algorithmic map ( $T_{\mathrm{PG}}^{\lambda}$ in our case).<sup>2</sup>
|
| 143 |
+
|
| 144 |
+
Another popular acceleration scheme for gradient algorithms is the spectral approach pioneered by [5]. They take the differences of the gradients and of the iterates in two consecutive iterations to estimate the curvature at the current point, and use it to decide the step size for updating along the reversed gradient direction. It has been shown in [39] that equipping this step size with a backtracking procedure leads to significantly faster convergence for proximal gradient on regularized optimization problems, which includes our PG for (1) as a special case.
|
| 145 |
+
|
| 146 |
+
To describe our proposed double acceleration procedure that combines extrapolation and spectral techniques, we first observe that all PG iterates lie on $A_{s}$ , and that $A_{s}$ can be finitely decomposed as (7). When two consecutive iterates lie on the same convex subspace $A_{J}$ for some $J \in \mathcal{I}_s$ , within these two iterations, we are actually conducting convex optimization. In this case, an extrapolation step within $A_{J}$ is reasonable because it will not violate the constraint, and acceleration can be expected from the improved rates of accelerated proximal gradient on convex problems in [9, 32]. Judging from Theorem 2.1 (b), the corresponding $J$ is also a candidate index set that belongs to $\mathcal{I}_{w^*}$ , so extrapolation within $A_{J}$ makes further sense. We set $t_k = 0$ to skip the extrapolation step if $d^{k}$ is not a descent direction for $f$ at $w^{k}$ . Otherwise, we start from some $\hat{t}_k > 0$ decided by the curvature information of $f$ , and then execute a backtracking linesearch along $d^{k} := w^{k} - w^{k-1}$ to set $t_k = \eta^i\hat{t}_k$ for the smallest integer $i \geq 0$ that provides sufficient descent
|
| 147 |
+
|
| 148 |
+
$$
|
| 149 |
+
f \left(w ^ {k} + t _ {k} d ^ {k}\right) \leq f \left(w ^ {k}\right) - \sigma t _ {k} ^ {2} \| d ^ {k} \| ^ {2}, \tag {12}
|
| 150 |
+
$$
|
| 151 |
+
|
| 152 |
+
given parameters $\eta, \sigma \in (0,1)$ . We then apply (5) to $z^k = w^k + t_k d^k$ to obtain $w^{k+1}$ .
|
| 153 |
+
|
| 154 |
+
For the spectral initialization $\hat{t}_k$ for accelerating the convergence, instead of directly using approaches of [5, 39] that takes the reversed gradient as the update direction, we need to devise a different mechanism as our direction $d^k$ is not directly related to the gradient. We observe that for the stepsize
|
| 155 |
+
|
| 156 |
+
$$
|
| 157 |
+
\alpha_ {k} := \left\langle s ^ {k}, s ^ {k} \right\rangle / \left\langle s ^ {k}, r ^ {k} \right\rangle , \quad s ^ {k} := w ^ {k} - w ^ {k - 1}, \quad r ^ {k} := \nabla f \left(w ^ {k}\right) - \nabla f \left(w ^ {k - 1}\right) \tag {13}
|
| 158 |
+
$$
|
| 159 |
+
|
| 160 |
+
used in [5], the final update $-\alpha_{k}\nabla f(w^{k})$ is actually the minimizer of the following subproblem
|
| 161 |
+
|
| 162 |
+
$$
|
| 163 |
+
\min _ {d \in \Re^ {n}} \left\langle \nabla f (w ^ {k}), d \right\rangle + \| d \| ^ {2} / (2 \alpha_ {k}). \tag {14}
|
| 164 |
+
$$
|
| 165 |
+
|
| 166 |
+
By juxtaposing the above quadratic problem and the upper bound provided by the descent lemma [7, Lemma 5.7], we can view $\alpha_{k}^{-1}$ as an estimate of the local Lipschitz parameter that could be much smaller than $L$ but still guarantee descent of the objective. We thus follow this idea to decide $\hat{t}_k$ using such curvature estimate and the descent lemma by
|
| 167 |
+
|
| 168 |
+
$$
|
| 169 |
+
\hat {t} _ {k} = \underset {t \geq 0} {\arg \min } \left\langle \nabla f \left(w ^ {k}\right), t d ^ {k} \right\rangle + \left\| t d ^ {k} \right\| ^ {2} / (2 \alpha_ {k}) \quad \Leftrightarrow \quad \hat {t} _ {k} = - \left\langle \alpha_ {k} \nabla f \left(w ^ {k}\right), d ^ {k} \right\rangle / \left\| d ^ {k} \right\| ^ {2}. \tag {15}
|
| 170 |
+
$$
|
| 171 |
+
|
| 172 |
+
Another interpretation of (13) is that $\alpha_{k}^{-1}I$ also serves as an estimate of $\nabla^2 f(w^k),^3$ and the objective in (14) is a low-cost approximation of the second-order Taylor expansion of $f$ . However, we notice that for problems in the form of (4) and with $d^{k}\in A_{J}$ , the exact second-order Taylor expansion
|
| 173 |
+
|
| 174 |
+
$$
|
| 175 |
+
f \left(w ^ {k} + t d ^ {k}\right) \approx f \left(w ^ {k}\right) + t \left\langle \nabla f \left(w ^ {k}\right), d ^ {k} \right\rangle + t ^ {2} \left\langle \nabla^ {2} f \left(w ^ {k}\right) d ^ {k}, d ^ {k} \right\rangle / 2 \tag {16}
|
| 176 |
+
$$
|
| 177 |
+
|
| 178 |
+
can be calculated efficiently. In particular, for (4) and any $d^{k}\in A_{J}$ , we get from $Xd^{k} = X_{\cdot ,J}d_{J}^{k}$ :
|
| 179 |
+
|
| 180 |
+
$$
|
| 181 |
+
\begin{array}{l} \nabla f (w ^ {k}) ^ {\top} d ^ {k} = \nabla g \left(\left(X w ^ {k}\right)\right) ^ {\top} \left(X _ {:, J} d _ {J} ^ {k}\right), \\ \left\langle \nabla^ {2} f (w ^ {k}) d ^ {k}, d ^ {k} \right\rangle = \left\langle \left(X _ {:, J} d _ {J} ^ {k}\right), \nabla^ {2} g \left(\left(X w ^ {k}\right)\right) \left(X _ {:, J} d _ {J} ^ {k}\right) \right\rangle , \\ \end{array}
|
| 182 |
+
$$
|
| 183 |
+
|
| 184 |
+
which can be calculated in $O(ms)$ time by computing $X_{:,J}d_J^k$ first. This $O(ms)$ cost is much cheaper than the $O(mn)$ one for evaluating the full gradient of $f$ needed in the PG step, so our extrapolation plus spectral techniques has only negligible cost. Moreover, for our case of $d^{k} = w^{k} - w^{k - 1}$ , we can further reduce the cost of calculate $X_{:,J}d_J^k$ and thus (17) to $O(m)$ by recycling intermediate computational results needed in evaluating $f(w^{k})$ through $X_{:,J}d_J^k = Xw^k -Xw^{k - 1}$ . With such tricks for efficient computation, we therefore consider the more accurate approximation to let $\hat{t}_k$ be
|
| 185 |
+
|
| 186 |
+
the scalar that minimizes the quadratic function on the right-hand side of (16) for problems in the form (4). That is, we use
|
| 187 |
+
|
| 188 |
+
$$
|
| 189 |
+
\hat {t} _ {k} := - \left\langle \nabla f \left(w ^ {k}\right), d ^ {k} \right\rangle / \left\langle \nabla^ {2} f \left(w ^ {k}\right) d ^ {k}, d ^ {k} \right\rangle . \tag {18}
|
| 190 |
+
$$
|
| 191 |
+
|
| 192 |
+
Finally, for both (18) and (15), we safeguard $\hat{t}_k$ by
|
| 193 |
+
|
| 194 |
+
$$
|
| 195 |
+
\hat {t} _ {k} \leftarrow P _ {\left| c _ {k} \alpha_ {\min }, c _ {k} \alpha_ {\max } \right]} \left(\hat {t} _ {k}\right) \tag {19}
|
| 196 |
+
$$
|
| 197 |
+
|
| 198 |
+
for some fixed $\alpha_{\mathrm{max}} \geq \alpha_{\mathrm{min}} > 0$ , where
|
| 199 |
+
|
| 200 |
+
$$
|
| 201 |
+
c _ {k} := \left\| \left(\nabla f \left(w ^ {k}\right)\right) _ {J} \right\| / \left(\zeta_ {k} \left\| d ^ {k} \right\|\right), \quad \zeta_ {k} := - \left\langle d ^ {k}, \nabla f \left(w ^ {k}\right) \right\rangle / \left(\left\| d ^ {k} \right\| \left\| \left(\nabla f \left(w ^ {k}\right)\right) _ {J} \right\|\right) \in (0, 1 ]. \tag {20}
|
| 202 |
+
$$
|
| 203 |
+
|
| 204 |
+
We also note that the low cost of evaluating $Xd^k$ is also the key to making the backtracking in (12) practical, as each $f(w^{k} + \eta^{i}\hat{t}_{k}d^{k})$ can be calculated in $O(m)$ time through linear combinations of $Xw^{k}$ and $Xd^{k}$ . The above procedure is summarized in Algorithm 1 with global convergence guaranteed by Theorem 3.1. In Theorem 3.2, we establish its full convergence as well as its convergence rates under a KL condition at $w^{*}$ : there exists neighborhood $U\subset \Re^n$ of $w^{*}$ , $\theta \in [0,1]$ , and $\kappa >0$ such that for every $J\in \mathcal{I}_{w^*}$ ,
|
| 205 |
+
|
| 206 |
+
$$
|
| 207 |
+
\left(f (w) - f \left(w ^ {*}\right)\right) ^ {\theta} \leq \kappa \| \left(\nabla f (w)\right) _ {J} \|, \quad \forall w \in A _ {J} \cap U. \tag {21}
|
| 208 |
+
$$
|
| 209 |
+
|
| 210 |
+
We denote by $n_k$ the number of successful extrapolation steps in the first $k$ iterations of Algorithm 1. The part of $\theta \in [0,1/2]$ with $f$ being convex in the last item of Theorem 3.2 is directly from the result of [25].
|
| 211 |
+
|
| 212 |
+
Theorem 3.1. Under the hypotheses of Theorem 2.1, any accumulation point of a sequence generated by Algorithm 1 is a stationary point.
|
| 213 |
+
|
| 214 |
+
Theorem 3.2. Consider either (5) or Algorithm 1 with $\eta, \sigma, \epsilon \in (0,1)$ , and $\alpha_{\max} \geq \alpha_{\min} > 0$ , and suppose that there is an accumulation point $w^*$ of the iterates at which the KL condition holds. Then $w^k \to w^*$ . Moreover, the following rates hold:
|
| 215 |
+
|
| 216 |
+
(a) If $\theta \in (1/2, 1)$ : $f(w^k) - f(w^*) = O((k + n_k)^{-1/(2\theta - 1)})$ .
|
| 217 |
+
(b) If $\theta \in (0,1 / 2]$ : $f(w^{k}) - f(w^{*}) = O(\exp (-(k + n_{k})))$ .
|
| 218 |
+
(c) If $\theta = 0$ , or $\theta \in [0,1/2]$ and $f$ is convex: there is $k_0 \geq 0$ such that $f(w^k) = f(w^*)$ for all $k \geq k_0$ .
|
| 219 |
+
|
| 220 |
+
We stress that convexity of $f$ is not required in Theorems 3.1 and 3.2 except the second half of the last item of Theorem 3.2. There are several advantages of the proposed extrapolation strategy over existing ones in [27, 37]. The most obvious one is the faster rates in Theorem 3.2 over PG such that each successful extrapolation step in our method contributes to the convergence speed, while existing methods only provide the same convergence speed as PG. Next, existing strategies only use prespecified step sizes without information from the given problem nor the current progress, and they only restrict such step sizes to be within [0, 1]. Our method, on the other hand, fully takes advantage of the function curvature and can allow for arbitrarily large step sizes to better decrease the objective. In fact, we often observe $t_k \gg 1$ in our numerical experiments. Moreover, our acceleration techniques utilize the nature of (7) and (4) to obtain very efficient implementation for ERM problems such that the per-iteration cost of Algorithm 1 is almost the same as that of PG, while the approach of [27] requires evaluating $f$ and $\nabla f$ at two points per iteration, and thus has twice the per-iteration cost.
|
| 221 |
+
|
| 222 |
+
A finite termination result similar to Theorem 3.2 (c) is presented in [29] under a Hölderian error bound that is closely related to the KL condition, but their result requires convexity of both the smooth term and the regularizer, so it is not applicable to (1) that involves a nonconvex constraint.
|
| 223 |
+
|
| 224 |
+
# 3.2 Subspace Identification
|
| 225 |
+
|
| 226 |
+
In line with the above discussion, we interpret (8) as a theoretical property guaranteeing that the iterates of the projected gradient algorithm (5) will eventually identify the subspaces $A_{J}$ that contain a candidate solution $w^{*}$ after a finite number of iterations. Consequently, the task of minimizing $f$ over the nonconvex set $A_{s}$ can be reduced to a convex optimization problem of minimizing $f$ over $A_{J}$ . Motivated by this, we present a two-stage algorithm described in Algorithm 2 that switches to a high-order method for smooth convex optimization after a candidate piece $A_{J}$ is identified to
|
| 227 |
+
|
| 228 |
+
Algorithm 1: Accelerated projected gradient algorithm by extrapolation (APG)
|
| 229 |
+
1 Given an initial vector $w^0\in \Re^n$ and parameters $\epsilon ,\eta ,\sigma \in (0,1)$ $\alpha_{\mathrm{max}}\geq \alpha_{\mathrm{min}} > 0$ $\lambda \in (0,1 / L)$
|
| 230 |
+
2 for $k = 0,1,2,\ldots$ do
|
| 231 |
+
3 if $k > 0$ . $w^{k - 1}$ and $w^{k}$ activate the same $A_{J}$ ; and $\zeta_k\geq \epsilon$ then
|
| 232 |
+
4 $\begin{array}{r}d^{k}\gets w^{k} - w^{k - 1},\mathrm{and~compute~}\hat{t}_{k}\mathrm{from~(19)}\mathrm{with~either~(15)}\mathrm{or~(18)}\\ \mathrm{for~}i = 0,1,\dots \mathrm{do}\\ t_k\gets \eta^i\hat{t}_k\\ \mathrm{if~(12)~is~satisfied~then~}z^k\gets w^k +t_kd^k,\mathrm{and~break}\\ \mathrm{else~}z^k\gets w^k\\ w^{k + 1}\gets T_{\mathrm{PG}}^\lambda (z^k) \end{array}$
|
| 233 |
+
5
|
| 234 |
+
6
|
| 235 |
+
7
|
| 236 |
+
8
|
| 237 |
+
9
|
| 238 |
+
|
| 239 |
+
obtain even faster convergence. Since $\nabla f$ is assumed to be Lipschitz continuous, the generalized Hessian of $f$ exists everywhere [19], so we may employ a semismooth Newton (SSN) method [35] with backtracking linesearch to get a faster convergence speed with low cost (details in Appendix A). In particular, we reduce the computation costs by considering the restriction of $f$ on the subspace $A_{J}$ by treating the coordinates not in $J$ as non-variables so that the problem considered is indeed smooth and convex. As we cannot know a priori whether $I_{w^*}$ is indeed identified, we adopt the approach implemented in [26, 28, 23] to consider it identified when $w^{k}$ activates the same $A_{J}$ for long enough consecutive iterations. To further safeguard that we are not optimizing over a wrong subspace, we also incorporate the idea of [38, 4, 28, 23] to periodically alternate to a PG step (5) after switching to the SSN stage. A detailed description of this two-stage algorithm is in Algorithm 2.
|
| 240 |
+
|
| 241 |
+
In the following theorem, we show that superlinear convergence can be obtained for Algorithm 2 even if we take only one SSN step every time between two steps of (5), using a simplified setting of twice-differentiability. For our next theorem, we need to introduce some additional notations. Given any $w \in A_J$ , we use $f_J(w_J) \coloneqq f(w)$ to denote the function of considering only the coordinates of $w$ in $J$ as variables and treating the remaining as constant zeros. We assume that the conditions of Theorem 2.1 (b) hold with $w^* \in A_s$ , and that $f$ is twice-differentiable around a neighborhood $U$ of $w^*$ with $\nabla^2 f_J$ Lipschitz continuous in $U$ and $\nabla^2 f_J(w^*)$ positive definite for all $J \in \mathcal{I}_{w^*}$ .
|
| 242 |
+
|
| 243 |
+
Theorem 3.3. Suppose that starting after $k \geq N$ and $P_{A_s}(w^k) \subset U$ , we conduct $t$ Newton steps between every two steps of (5) for $t \geq 1$ :
|
| 244 |
+
|
| 245 |
+
$$
|
| 246 |
+
w ^ {k, 0} \in P _ {A _ {s}} \left(w ^ {k}\right), \left\{ \begin{array}{l l} J & \in \mathcal {I} _ {w ^ {k, 0}}, \\ w _ {i} ^ {k, j + 1} & = 0, \quad \forall i \notin J, \quad j = 1, \dots , t - 1, \quad w ^ {k + 1} \in T _ {\mathrm {P G}} ^ {\lambda} \left(w ^ {k, t}\right). \\ w _ {J} ^ {k, j + 1} & = w _ {J} ^ {k, j} - \nabla^ {2} f _ {J} \left(w _ {J} ^ {k, j}\right) ^ {- 1} \nabla f _ {J} \left(w _ {J} ^ {k, j}\right), \end{array} \right. \tag {22}
|
| 247 |
+
$$
|
| 248 |
+
|
| 249 |
+
Then $w^k \to w^*$ at a $Q$ -quadratic rate.
|
| 250 |
+
|
| 251 |
+
In practice, the linear system for obtaining the SSN step is only solved in exactly via a (preconditioned) conjugate gradient (PCG) method, and with suitable stopping conditions for PCG and proper algorithmic modifications such as those in [40, 30], superlinear convergence can still be obtained easily. Interested readers are referred to Appendix A for a more detailed description of our implementation.
|
| 252 |
+
|
| 253 |
+
# 4 Experiments
|
| 254 |
+
|
| 255 |
+
In this section, we conduct numerical experiments to demonstrate the accelerated techniques presented in Section 3. We employ Algorithm 1 (APG) with (18) to accelerate PG, and further accelerate APG by incorporating subspace identification described in Algorithm 2, which we denote by $\mathrm{APG + }.$ Comparisons with the extrapolated PG algorithm of Li and Lin [27], which we denote by PG-LL, are also presented. PG-LL is a state-of-the-art approach for nonconvex regularized optimization and thus suitable for (1). For $f$ in (1), we consider both LS (3) and logistic regression (LR)
|
| 256 |
+
|
| 257 |
+
$$
|
| 258 |
+
f (w) = \sum_ {i = 1} ^ {m} \log \left(1 + \exp \left(- y _ {i} x _ {i} ^ {\top} w\right)\right) + \mu \| w \| ^ {2} / 2, \tag {23}
|
| 259 |
+
$$
|
| 260 |
+
|
| 261 |
+
Algorithm 2: Accelerated projected gradient algorithm by subspace identification $(\mathrm{PG}+)$
|
| 262 |
+
1 Given an initial vector $w^0\in \Re^n$ and $S,t\in \mathbb{N}$ . Set Unchanged $\leftarrow 0$
|
| 263 |
+
2 for $k = 0,1,2,\ldots$ do
|
| 264 |
+
3 if $k > 0$ , and $w^{k - 1}$ and $w^{k}$ activate the same component of $A_{s}$ then
|
| 265 |
+
4 Let $J\in \mathcal{J}_s$ correspond to the activated component
|
| 266 |
+
5 Unchanged $\leftarrow$ Unchanged $+1$
|
| 267 |
+
6 else Unchanged $\leftarrow 0$
|
| 268 |
+
7 if Unchanged $\geq S$ then
|
| 269 |
+
8 $y^{k}\gets P_{A_{J}}(w^{k})$ and use t steps of SSN described in Appendix A, starting from $y^{k}$ , to find $z^{k}$ that approximately minimizes $f|_{A_J}$
|
| 270 |
+
9 if SSN fails then $z^{k}\gets w^{k}$ and Unchanged $\leftarrow 0$
|
| 271 |
+
10 else $z^{k}\gets w^{k}$
|
| 272 |
+
11 $w^{k + 1}\gets T_{\mathrm{PG}}^{\lambda}(z^{k})$
|
| 273 |
+
|
| 274 |
+
where $(x_{i},y_{i})\in \Re^{n}\times \{-1,1\}$ , $i = 1,\dots ,m$ , are the training instances, and $\mu >0$ is a small regularization parameter added to make the logistic loss coercive.
|
| 275 |
+
|
| 276 |
+
The algorithms are implemented in MATLAB and tested with public datasets in Tables 2 and 3 in Appendix B. All algorithms compared start from $w^0 = 0$ and terminate when the first-order optimality condition
|
| 277 |
+
|
| 278 |
+
$$
|
| 279 |
+
\operatorname {R e s i d u a l} (w) := \| w - P _ {A _ {s}} (w - \lambda \nabla f (w)) \| / (1 + \| w \| + \lambda \| \nabla f (w) \|) < \hat {\epsilon} \tag {24}
|
| 280 |
+
$$
|
| 281 |
+
|
| 282 |
+
is met for some given $\hat{\epsilon} > 0$ . More setting and parameter details of our experiments are in Appendix B.
|
| 283 |
+
|
| 284 |
+
Comparisons of algorithms for large datasets. To fit the practical scenario of using (1), we specifically selected high-dimensional datasets with $n$ larger than $m$ . We conduct experiments with various $s$ to widely test the performance under different scenarios. In particular, we consider $s \in \{\lceil 0.01m\rceil ,\lceil 0.05m\rceil ,\lceil 0.1m\rceil \}$ on all data except for the largest dataset webspan, for which we set $s \in \{\lceil 0.001m\rceil ,\lceil 0.005m\rceil ,\lceil 0.01m\rceil \}$ . The results of the experiment with the smallest $s$ are summarized in Figure 1, and results of the other two settings of $s$ are in Appendix C.
|
| 285 |
+
|
| 286 |
+

|
| 287 |
+
(a) news20, $s = \lceil 0.01m \rceil$
|
| 288 |
+
|
| 289 |
+

|
| 290 |
+
(b) rcv1.binary, $s = \lceil 0.01m\rceil$
|
| 291 |
+
|
| 292 |
+

|
| 293 |
+
(c) webspan, $s = \lceil 0.001m\rceil$
|
| 294 |
+
|
| 295 |
+

|
| 296 |
+
Least square
|
| 297 |
+
(d) E2006-log1p, $s = \lceil 0.01m\rceil$
|
| 298 |
+
Figure 1: Experiment on sparse regularized LR and LS. We present time v.s. residual in (24).
|
| 299 |
+
|
| 300 |
+

|
| 301 |
+
(e) E2006-tfidf, $s = \lceil 0.01m\rceil$
|
| 302 |
+
|
| 303 |
+
Evidently, the extrapolation procedure in APG provides a significant improvement in the running time compared with the base algorithm PG, and further incorporating subspace identification as in $\mathrm{APG + }$ results to a very fast algorithm that outperforms PG and APG by magnitudes. Since the per-iteration
|
| 304 |
+
|
| 305 |
+
Table 1: Comparison of algorithms for (1) to meet (24) with $\hat{\epsilon} = 10^{-6}$ , with (23) and (3) and with sparsity levels $s_1 = [0.01m]$ and $s_2 = [0.05m]$ for all datasets except webspan where $s_1 = [0.001m]$ and $s_2 = [0.005m]$ . CPU: CPU time in seconds. GE: number of gradient evaluations. In one iteration, PG, APG, and $\mathrm{APG + }$ needs one gradient evaluation, while PG-LL and PG-LL+ needs two. CG: number of Hessian-vector products in the PCG procedure for obtaining SSN steps. PA: prediction accuracy (for (23)). MSE: mean-squared error (for (3)). Time with * indicates that the algorithm is terminated after running 10000 iterations without satisfying (24).
|
| 306 |
+
|
| 307 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">Method</td><td colspan="4">s1</td><td colspan="4">s2</td></tr><tr><td>CPU</td><td>GE</td><td>CG</td><td>PA</td><td>CPU</td><td>GE</td><td>CG</td><td>PA</td></tr><tr><td rowspan="5">news20</td><td>PG</td><td>*738.7</td><td>10000</td><td>0</td><td>0.877</td><td>*728.9</td><td>10000</td><td>0</td><td>0.935</td></tr><tr><td>APG</td><td>151.7</td><td>1583</td><td>0</td><td>0.877</td><td>758.3</td><td>8428</td><td>0</td><td>0.923</td></tr><tr><td>APG+</td><td>5.0</td><td>52</td><td>63</td><td>0.853</td><td>16.1</td><td>171</td><td>67</td><td>0.923</td></tr><tr><td>PG-LL</td><td>366.7</td><td>4682</td><td>0</td><td>0.873</td><td>*1494.4</td><td>20000</td><td>0</td><td>0.922</td></tr><tr><td>APG-LL+</td><td>6.6</td><td>152</td><td>88</td><td>0.854</td><td>29.2</td><td>417</td><td>89</td><td>0.920</td></tr><tr><td rowspan="5">rcv1.binary</td><td>PG</td><td>*58.4</td><td>10000</td><td>0</td><td>0.937</td><td>*72.7</td><td>10000</td><td>0</td><td>0.951</td></tr><tr><td>APG</td><td>12.6</td><td>1120</td><td>0</td><td>0.935</td><td>82.4</td><td>6372</td><td>0</td><td>0.934</td></tr><tr><td>APG+</td><td>0.3</td><td>21</td><td>42</td><td>0.931</td><td>2.4</td><td>192</td><td>138</td><td>0.940</td></tr><tr><td>PG-LL</td><td>22.2</td><td>3638</td><td>0</td><td>0.935</td><td>72.1</td><td>8738</td><td>0</td><td>0.929</td></tr><tr><td>APG-LL+</td><td>0.6</td><td>99</td><td>49</td><td>0.930</td><td>4.9</td><td>626</td><td>236</td><td>0.939</td></tr><tr><td rowspan="5">webspam</td><td>PG</td><td>*18660.1</td><td>10000</td><td>0</td><td>0.964</td><td>*30776.2</td><td>10000</td><td>0</td><td>0.978</td></tr><tr><td>APG</td><td>19683.4</td><td>7682</td><td>0</td><td>0.981</td><td>7722.4</td><td>2008</td><td>0</td><td>0.991</td></tr><tr><td>APG+</td><td>248.3</td><td>75</td><td>88</td><td>0.969</td><td>695.4</td><td>164</td><td>57</td><td>0.991</td></tr><tr><td>PG-LL</td><td>9001.3</td><td>4720</td><td>0</td><td>0.972</td><td>10163.5</td><td>3098</td><td>0</td><td>0.990</td></tr><tr><td>APG-LL+</td><td>447.3</td><td>264</td><td>92</td><td>0.965</td><td>837.3</td><td>294</td><td>90</td><td>0.992</td></tr><tr><td></td><td></td><td>CPU</td><td>GE</td><td>CG</td><td>MSE</td><td>CPU</td><td>GE</td><td>CG</td><td>MSE</td></tr><tr><td rowspan="5">E2006-log1p</td><td>PG</td><td>*2998.6</td><td>10000</td><td>0</td><td>0.167</td><td>*3644.1</td><td>10000</td><td>0</td><td>0.161</td></tr><tr><td>APG</td><td>270.6</td><td>669</td><td>0</td><td>0.136</td><td>811.8</td><td>1757</td><td>0</td><td>0.133</td></tr><tr><td>APG+</td><td>19.5</td><td>40</td><td>49</td><td>0.141</td><td>105.6</td><td>222</td><td>124</td><td>0.132</td></tr><tr><td>PG-LL</td><td>*6049.8</td><td>20000</td><td>0</td><td>0.132</td><td>2696.0</td><td>7086</td><td>0</td><td>0.132</td></tr><tr><td>APG-LL+</td><td>41.2</td><td>142</td><td>38</td><td>0.142</td><td>107.5</td><td>326</td><td>100</td><td>0.138</td></tr><tr><td rowspan="5">E2006-tfidf</td><td>PG</td><td>*242.7</td><td>10000</td><td>0</td><td>0.152</td><td>*666.9</td><td>10000</td><td>0</td><td>0.152</td></tr><tr><td>APG</td><td>1.3</td><td>14</td><td>0</td><td>0.154</td><td>3.3</td><td>33</td><td>0</td><td>0.153</td></tr><tr><td>APG+</td><td>1.3</td><td>8</td><td>6</td><td>0.141</td><td>3.3</td><td>31</td><td>7</td><td>0.139</td></tr><tr><td>PG-LL</td><td>110.6</td><td>4440</td><td>0</td><td>0.152</td><td>304.8</td><td>4558</td><td>0</td><td>0.151</td></tr><tr><td>APG-LL+</td><td>1.7</td><td>34</td><td>6</td><td>0.141</td><td>3.7</td><td>47</td><td>7</td><td>0.139</td></tr></table>
|
| 308 |
+
|
| 309 |
+
cost of PG and APG are almost the same as argued in Section 3, we note that the convergence of APG in terms of iterations is also superior to that of PG.
|
| 310 |
+
|
| 311 |
+
We also report the required time and number of gradient evaluations (which is the main computation at each iteration) for the algorithms to drive (24) below $\hat{\epsilon} = 10^{-6}$ . For PG, APG, and $\mathrm{APG + }$ , one gradient evaluation is needed per iteration, so the number of gradient evaluations is equivalent to the iteration count. For PG-LL, two gradient evaluations are needed per iteration, so its cost is twice of other methods. We also report the prediction performance on the test data, and we in particular use the test accuracy for (23) and the mean-squared error for (3). Results for the two smaller $s$ are in Table 1 while that for the largest $s$ is in Appendix C. It is clear from the results in Table 1 that APG outperforms PG-LL for most of the test instances considered, while $\mathrm{APG + }$ is magnitudes faster than PG-LL. When we equip PG-LL with our acceleration techniques by replacing $T_{\mathrm{PG}}^{\lambda}$ in Algorithms 1 and 2 with the algorithmic map defining PG-LL, we can further speed up PG-LL greatly as shown under the name $\mathrm{APG - LL + }$ (see Table 1). We do not observe a method that consistently possesses the best prediction performance, as this is mainly affected by which local optima is found, while no algorithm is able to find the best local optima among all candidates. With no prediction performance degradation, we see that $\mathrm{APG + }$ and $\mathrm{APG - LL + }$ reduce the time needed to solve (1) to a level significantly lower than that of the state of the art.
|
| 312 |
+
|
| 313 |
+
In Appendix C.3, we demonstrate the effect on prediction performance when we vary the residual (24) and illustrate that tight residual level is indeed required to obtain better prediction. Comparisons with a greedy method is shown in Appendix C.4.
|
| 314 |
+
|
| 315 |
+
Transition Plots. To demonstrate the behavior of the algorithm for increasing values of $s$ , we fit the smaller datasets in Table 3 using logistic loss (23) and least squares loss (3) for varying $s = \lceil km \rceil$ , where $k = 0.2, 0.4, 0.6, \ldots, 3$ . The transition plots are presented in Figure 2. We note that the time is in log scale.
|
| 316 |
+
|
| 317 |
+
We can see clearly that $\mathrm{APG + }$ and $\mathrm{APG - LL + }$ are consistently magnitudes faster than the baseline PG method throughout all sparsity levels. On the other hand, the same-subspace extrapolation scheme of APG is consistently faster than PG and APG-LL and slower than the two Newton acceleration schemes, although the performance is sometimes closer to $\mathrm{APG + / APG - LL + }$ while sometimes closer to PG. APG-LL tends to outperform PG in most situations as well, but in several cases when solving the least square problem, especially when $s$ is small, it can sometimes be slower than PG. Overall speaking, the results in the transition plots show that our proposed acceleration schemes are indeed effective for all sparsity levels tested.
|
| 318 |
+
|
| 319 |
+

|
| 320 |
+
(a) colon-cancer
|
| 321 |
+
|
| 322 |
+

|
| 323 |
+
Sparse regularized logistic regression
|
| 324 |
+
(b)duke
|
| 325 |
+
|
| 326 |
+

|
| 327 |
+
(c) gisette_scale
|
| 328 |
+
|
| 329 |
+

|
| 330 |
+
(d) leukemia
|
| 331 |
+
|
| 332 |
+

|
| 333 |
+
(e) colon-cancer
|
| 334 |
+
Figure 2: Transition plots. We present sparsity levels versus running time (in log scale). Top row: logistic loss. Bottom row: least square loss.
|
| 335 |
+
|
| 336 |
+

|
| 337 |
+
Sparse least squares regression
|
| 338 |
+
(f)duke
|
| 339 |
+
|
| 340 |
+

|
| 341 |
+
(g) gisette_scale
|
| 342 |
+
|
| 343 |
+

|
| 344 |
+
(h) leukemia
|
| 345 |
+
|
| 346 |
+
# 5 Conclusions
|
| 347 |
+
|
| 348 |
+
In this work, we revisited the projected gradient algorithm for solving $\ell_0$ -norm constrained optimization problems. Through a natural decomposition of the constraint set into subspaces and the proven ability of the projected gradient method to identify a subspace that contains a solution, we further proposed effective acceleration schemes with provable convergence speed improvements. Experiments showed that our acceleration strategies improve significantly both the convergence speed and the running time of the original projected gradient algorithm, and outperform the state of the art for $\ell_0$ -norm constrained problems by a huge margin. We plan to extend our analysis and algorithm to the setting of a nonconvex objective in the near future.
|
| 349 |
+
|
| 350 |
+
# Acknowledgments
|
| 351 |
+
|
| 352 |
+
This work was supported in part by Academia Sinica Grand Challenge Program Seed Grant No. AS-GCS-111-M05 and NSTC of R.O.C. grants 109-2222-E-001-003 and 111-2628-E-001-003.
|
| 353 |
+
|
| 354 |
+
# References
|
| 355 |
+
|
| 356 |
+
[1] Jan Harold Alcantara and Ching-pei Lee. Global convergence and acceleration of fixed point iterations of union upper semicontinuous operators: proximal algorithms, alternating and averaged nonconvex projections, and linear complementarity problems, 2022. arXiv:2202.10052. 2
|
| 357 |
+
[2] Hédy Attouch, Jérôme Bolte, and Benar Fux Svaiter. Convergence of descent methods for semi-algebraic and tame problems: proximal algorithms, forward-backward splitting, and regularized Gauss-Seidel methods. Mathematical Programming, 137(1):91-129, 2013. 4, 31, 32
|
| 358 |
+
[3] Sohail Bahmani, Bhiksha Raj, and Petros T. Boufounos. Greedy sparsity-constrained optimization. Journal of Machine Learning Research, 14:807-841, 2013. 2, 25
|
| 359 |
+
[4] Gilles Bareilles, Franck Iutzeler, and Jérôme Malick. Newton acceleration on manifolds identified by proximal-gradient methods. Technical report. arXiv:2012.12936. 7, 33
|
| 360 |
+
[5] Jonathan Barzilai and Jonathan M. Borwein. Two-point step size gradient methods. IMA Journal of Numerical Analysis, 8:141-148, 1988. 5
|
| 361 |
+
[6] E. M. L. Beale, M. G. Kendall, and D. W. Mann. The discarding of variables in multivariate analysis. Biometrika, 54(3-4):357-366, 1967. 1
|
| 362 |
+
[7] Amir Beck. First-Order Methods in Optimization. SIAM - Society for Industrial and Applied Mathematics, Philadelphia, PA, United States, 2017. 5, 27, 28
|
| 363 |
+
[8] Amir Beck and Yonina C. Eldar. Sparsity constrained nonlinear optimization: optimality conditions and algorithms. SIAM Journal on Optimization, 23(3):1480-1509, 2013. 4
|
| 364 |
+
[9] Amir Beck and Marc Teboulle. A fast iterative shrinkage thresholding algorithm for linear inverse problems. SIAM Journal on Imaging Sciences, 2(1):183-202, 2009. 4, 5
|
| 365 |
+
[10] Amir Beck and Marc Teboulle. A linearly convergent algorithm for solving a class of nonconvex/affine feasibility problems. In H. H. Bauschke, R. S. Burachik, P. L. Combettes, V. Elser, D. R. Luke, and H. Wolkowicz, editors, Fixed-Point Algorithms for Inverse Problems in Science and Engineering, volume 49 of Springer Optimization and Its Applications, pages 33–48. Springer, New York, NY, 2011. 2, 3
|
| 366 |
+
[11] D. Bertsimas, Angela King, and R. Mazumder. Best subset selection via a modern optimization lens. Annals of Statistics, 44(2):813-852, 2016. 2, 3, 4, 16
|
| 367 |
+
[12] Thomas Blumensath. Accelerated iterative hard thresholding. Signal Processing, 92:752-756, 2012. 2
|
| 368 |
+
[13] Thomas Blumensath and Mike E. Davies. Iterative hard thresholding for compressed sensing. Applied and Computational Harmonic Analysis, 27:265-274, 2009. 2
|
| 369 |
+
[14] Jérôme Bolte, Shoham Sabach, Marc Teboulle, and Yakov Vaisboud. First order methods beyond convexity and lipschitz gradient continuity with applications to quadratic inverse problems. SIAM Journal on Optimization, 28(3):2131-2151, 2018. 4
|
| 370 |
+
[15] Jianqing Fan and Runze Li. Variable selection via nonconcave penalized likelihood and its oracle properties. Journal of the American Statistical Association, 96(456):1348-1360, 2001. 2
|
| 371 |
+
[16] Leonardo Galli and Chih-Jen Lin. A study on truncated newton methods for linear classification. IEEE Transactions on Neural Networks and Learning Systems, 2021. 15
|
| 372 |
+
[17] Jun-ya Gotoh, Akiko Takeda, and Katsuya Tono. DC formulations and algorithms for sparse optimization problems. Mathematical Programming, 169(1):141-176, 2018. 2
|
| 373 |
+
[18] Robert Hesse, D. Russell Luke, and Patrick Neumann. Alternating projections and Douglas-Rachford for sparse affine feasibility. IEEE Trans. Signal Processing, 62:4868-4881, 2014. 30
|
| 374 |
+
|
| 375 |
+
[19] Jean-Baptiste Hiriart-Urruty, Jean-Jacques Strodiot, and V Hien Nguyen. Generalized Hessian matrix and second-order optimality conditions for problems with $C^{1,1}$ data. Applied Mathematics & Optimization, 11(1):43-56, 1984. 5, 7, 14
|
| 376 |
+
[20] Ronald R. Hocking and R. N. Leslie. Selection of the best subset in regression analysis. Technometrics, 9(4):531-540, 1967. 1
|
| 377 |
+
[21] Chih-Yang Hsia, Wei-Lin Chiang, and Chih-Jen Lin. Preconditioned conjugate gradient methods in truncated newton frameworks for large-scale linear classification. In Asian Conference on Machine Learning, pages 312-326, 2018. 14
|
| 378 |
+
[22] Prateek Jain and Purushottam Kar. Non-convex optimization for machine learning. Foundations and Trends in Machine Learning, 10(3-4):142-363, 2017. 1
|
| 379 |
+
[23] Ching-pei Lee. Accelerating inexact successive quadratic approximation for regularized optimization through manifold identification. Technical report, 2020. arXiv:2012.02522. 7
|
| 380 |
+
[24] Ching-pei Lee and Stephen J. Wright. First-order algorithms converge faster than $O(1 / k)$ on convex problems. In Proceedings of the International Conference on Machine Learning, 2019, 4, 29
|
| 381 |
+
[25] Ching-pei Lee and Stephen J. Wright. Revisiting superlinear convergence of proximal Newton methods to degenerate solutions. Technical report, 2022. 6, 33
|
| 382 |
+
[26] Sangkyun Lee and Stephen J. Wright. Manifold identification in dual averaging for regularized stochastic online learning. Journal of Machine Learning Research, 13:1705-1744, 2012. 7
|
| 383 |
+
[27] Huan Li and Zhouchen Lin. Accelerated proximal gradient methods for nonconvex programming. In Advances in Neural Information Processing Systems, volume 28, 2015. 2, 3, 4, 6, 7
|
| 384 |
+
[28] Yu-Sheng Li, Wei-Lin Chiang, and Ching-pei Lee. Manifold identification for ultimately communication-efficient distributed optimization. In Proceedings of the 37th International Conference on Machine Learning, 2020. 7
|
| 385 |
+
[29] Mingrui Liu and Tianbao Yang. Adaptive accelerated gradient converging method under Hölderian error bound condition. Advances in Neural Information Processing Systems, 30, 2017. 6
|
| 386 |
+
[30] Boris S. Mordukhovich, Xiaoming Yuan, Shangzhi Zeng, and Jin Zhang. A globally convergent proximal Newton-type method in nonsmooth convex optimization. Mathematical Programming, 2022. Online first. 7, 15
|
| 387 |
+
[31] Yurii Nesterov. A method for unconstrained convex minimization problem with the rate of convergence $o(1 / k^2)$ . Soviet Mathematics Doklady, 27(2):372-376, 1983. 4
|
| 388 |
+
[32] Yurii E. Nesterov. Gradient methods for minimizing composite functions. Mathematical Programming, 140(1):125-161, 2013. 5
|
| 389 |
+
[33] Jorge Nocedal and Stephen J. Wright. Numerical Optimization. Springer, New York, NY, USA, 2e edition, 2006. 14, 33
|
| 390 |
+
[34] Boris T. Polyak. Introduction to Optimization. Translation Series in Mathematics and Engineering. 1987. 32
|
| 391 |
+
[35] Liquin Qi and Jie Sun. A nonsmooth version of Newton's method. Mathematical programming, 58(1-3):353-367, 1993. 7
|
| 392 |
+
[36] Robert Tibshirani. Regression shrinkage and selection via the lasso. Journal of the Royal Statistical Society Series B, 58(1):267-288, 1996. 2
|
| 393 |
+
[37] Bo Wen, Xiaojun Chen, and Ting Kei Pong. A proximal difference-of-convex algorithm with extrapolation. Computational Optimization and Applications, 69:297–324, 2018. 2, 3, 4, 6
|
| 394 |
+
|
| 395 |
+
[38] Stephen J. Wright. Accelerated block-coordinate relaxation for regularized optimization. SIAM Journal on Optimization, 22(1):159-186, 2012. 7
|
| 396 |
+
[39] Stephen J. Wright, Robert D. Nowak, and Mário A. T. Figueiredo. Sparse reconstruction by separable approximation. IEEE Transactions on Signal Processing, 57(7):2479-2493, 2009. 5
|
| 397 |
+
[40] Man-Chung Yue, Zirui Zhou, and Anthony Man-Cho So. A family of inexact SQA methods for non-smooth convex minimization with provable convergence guarantees based on the Luo-Tseng error bound property. Mathematical Programming, 174(1-2):327-358, 2019. 7, 15
|
| 398 |
+
[41] Cun-Hui Zhang. Nearly unbiased variable selection under minimax concave penalty. The Annals of Statistics, 38(2):894-942, 2010. 2
|
acceleratedprojectedgradientalgorithmsforsparsityconstrainedoptimizationproblems/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:61df8e45e231cc1825f16777e6e513bbc85c69832c7b880d43284c3c71d041d9
|
| 3 |
+
size 464336
|
acceleratedprojectedgradientalgorithmsforsparsityconstrainedoptimizationproblems/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cd1ccd538171419378f06ff069ad18bf714e573523394bcfddc86414bf9dc829
|
| 3 |
+
size 610871
|
acceleratedtrainingofphysicsinformedneuralnetworkspinnsusingmeshlessdiscretizations/99deec50-a70f-4f8a-9b8c-7aa43a2634dd_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d11e54c463d5670efe9d704b648815de842c5dfa7a7ee6a65dc79c7398a9bf61
|
| 3 |
+
size 74243
|
acceleratedtrainingofphysicsinformedneuralnetworkspinnsusingmeshlessdiscretizations/99deec50-a70f-4f8a-9b8c-7aa43a2634dd_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dafa07fb0906c4f930046c6660467d439fadd584be767ad5a6ad6b620c0f3cce
|
| 3 |
+
size 94884
|
acceleratedtrainingofphysicsinformedneuralnetworkspinnsusingmeshlessdiscretizations/99deec50-a70f-4f8a-9b8c-7aa43a2634dd_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:984162ea546c654867de596169718286f4b5d9c74786e6b4109c8179ef2c7338
|
| 3 |
+
size 2778428
|
acceleratedtrainingofphysicsinformedneuralnetworkspinnsusingmeshlessdiscretizations/full.md
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerated Training of Physics-Informed Neural Networks (PINNs) using Meshless Discretizations
|
| 2 |
+
|
| 3 |
+
Ramansh Sharma
|
| 4 |
+
|
| 5 |
+
Department of Computer Science and Engineering, SRM Institute of Science and Technology, India rs7146@srmist.edu.in
|
| 6 |
+
|
| 7 |
+
Varun Shankar
|
| 8 |
+
|
| 9 |
+
School of Computing, University of Utah, UT, USA
|
| 10 |
+
|
| 11 |
+
shankar@cs.utah.edu
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Physics-informed neural networks (PINNs) are neural networks trained by using physical laws in the form of partial differential equations (PDEs) as soft constraints. We present a new technique for the accelerated training of PINNs that combines modern scientific computing techniques with machine learning: discretely-trained PINNs (DT-PINNs). The repeated computation of the partial derivative terms in the PINN loss functions via automatic differentiation during training is known to be computationally expensive, especially for higher-order derivatives. DT-PINNs are trained by replacing these exact spatial derivatives with high-order accurate numerical discretizations computed using meshless radial basis function-finite differences (RBF-FD) and applied via sparse-matrix vector multiplication. While in principle any high-order discretization may be used, the use of RBF-FD allows for DT-PINNs to be trained even on point cloud samples placed on irregular domain geometries. Additionally, though traditional PINNs (vanilla-PINNs) are typically stored and trained in 32-bit floating-point (fp32) on the GPU, we show that for DT-PINNs, using fp64 on the GPU leads to significantly faster training times than fp32 vanilla-PINNs with comparable accuracy. We demonstrate the efficiency and accuracy of DT-PINNs via a series of experiments. First, we explore the effect of network depth on both numerical and automatic differentiation of a neural network with random weights and show that RBF-FD approximations of third-order accuracy and above are more efficient while being sufficiently accurate. We then compare the DT-PINNs to vanilla-PINNs on both linear and nonlinear Poisson equations and show that DT-PINNs achieve similar losses with 2-4x faster training times on a consumer GPU. Finally, we also demonstrate that similar results can be obtained for the PINN solution to the heat equation (a space-time problem) by discretizing the spatial derivatives using RBF-FD and using automatic differentiation for the temporal derivative. Our results show that fp64 DT-PINNs offer a superior cost-accuracy profile to fp32 vanilla-PINNs, opening the door to a new paradigm of leveraging scientific computing techniques to support machine learning.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
Partial differential equations (PDEs) provide a convenient framework to model a large number of phenomena across science and engineering. In real-world scenarios, PDEs are typically challenging or impossible to solve using analytical techniques, and must instead be approximately solved using a numerical method. A variety of numerical methods to solve these PDEs have been developed including but not limited to finite difference (FD) methods (19) (which work primarily on rectangular domains
|
| 20 |
+
|
| 21 |
+
partitioned into Cartesian grids) and finite element (FE) methods (36) (which work on domains with curved boundaries but require partitioning the domain into multidimensional simplices). A modern class of numerical methods called meshless or meshfree methods generalizes finite difference methods in such a way as to remove the dependence on Cartesian grids, thereby allowing for the numerical solution of PDEs on point clouds. Of these, radial basis function-finite differences (RBF-FD) are among the most popular and widely-used (3; 5; 37; 8; 9; 1; 11; 12; 13; 10; 25; 26; 14; 33; 18), though a host of other such methods also exist. Much like FD or FE methods, these meshless methods can also approximate solutions to a desired order of accuracy.
|
| 22 |
+
|
| 23 |
+
More recently, PDE solvers based on machine learning (ML) have begun to gain in popularity due to the inherent ability of ML techniques such as neural networks (NNs) to recover highly complicated functions from data specified at arbitrary locations (15; 20). We focus on a popular class of ML-based meshless methods called physics-informed neural networks (PINNs) (27). PINNs can be used both to discover/infer PDEs that govern a given data set, and as direct PDE solvers. Our focus in this work is on the latter problem, though our techniques extend straightforwardly to inferring PDEs as well. PINNs are typically multilayer feedforward deep NNs (DNNs) that are trained using PDEs and boundary conditions as soft constraints, leveraging automatic differentiation (autograd) for computing derivatives appearing in the PDE terms. The original PINNs, often referred to as vanilla-PINNs, are challenging to train, at least partly because PDE-based constraints lead to complicated loss landscapes (17). These issues are somewhat ameliorated by using domain decomposition (X-PINNs) (16) or gradient-enhanced training (G-PINNs) (38). Other approaches for ameliorating these issues involve curriculum training or sequence-to-sequence learning (17). Many of these extensions can also help improve training and test accuracy. Much like other DNNs, PINNs are typically trained in 32-bit floating-point (i.e., fp32 or single precision).
|
| 24 |
+
|
| 25 |
+
In this work, we introduce a new technique for accelerating the training of vanilla-PINNs. Our technique relies on two key features: (a) using RBF-FD to compute highly accurate (nevertheless approximate) spatial derivatives in place of autograd, and (b) training the DNN in fp64 rather than fp32. These new discretely-trained PINNs (DT-PINNs) can be trained significantly faster than fp32 vanilla-PINNs on consumer desktop GPUs with no loss in accuracy or change in DNN architecture. The use of RBF-FD allows DT-PINNs to retain the meshless nature of vanilla-PINNs, thereby allowing for the solution of PDEs on domains with curved boundaries. As RBF-FD uses sparse-matrix vector multiplication (SpMV) to approximate the derivatives, DT-PINNs are also parallelizable on modern GPU architectures. It is important to note that DT-PINNs use autograd for the actual optimization of the PINN weights; only PDE derivatives are discretized using RBF-FD.
|
| 26 |
+
|
| 27 |
+
The NN literature does contain efforts to replace automatic differentiation with numerical differentiation. For instance, recent work showed that FD approximations can be efficient for learning generative models via score matching (23). Another example is an NN architecture that involves learning FD-like filters for faster prediction of PDEs (35). In the PINN literature, fractional-PINNs (F-PINNs) use numerical differentiation as autograd cannot compute fractional derivatives (22). Nevertheless, to the best of our knowledge, ours is the first work on using meshless high-order accurate FD-like methods in conjunction with PINNs, allowing them to be trained without any loss in accuracy on domains with curved boundaries (just as autograd does). An alternative would involve eliminating autograd inefficiencies via Taylor-mode differentiation (4). However, we show that at least part of the speedups observed in DT-PINNs is because numerical differentiation results in training completing in fewer epochs than if autograd were to be used.
|
| 28 |
+
|
| 29 |
+
To alleviate concerns about replacing autograd with RBF-FD, we first compare fp64 RBF-FD approximation of different orders of accuracy against fp32 autograd for DNNs and show the cost benefits of using higher-order accurate RBF-FD. Then, to illustrate the features of DT-PINNs, we focus for brevity on two purely spatial PDEs (the nonlinear and linear Poisson equations) and one space-time PDE (the heat equation). We use these settings to compare DT-PINNs and vanilla-PINNs for relative errors, timings, and speedups on a simple desktop GPU. We demonstrate through our experiments that DT-PINNs offer a superior cost-accuracy profile over vanilla-PINNs.
|
| 30 |
+
|
| 31 |
+
The remainder of this paper is organized as follows. In Section 2, we review both vanilla-PINNs and RBF-FD. Next, in Section 3, we discuss how to train DT-PINNs to solve both the Poisson and heat equations. Then, in Section 4, we present experimental results comparing RBF-FD and autograd, and comparing DT-PINNs against vanilla-PINN on the Poisson and heat equations. We summarize
|
| 32 |
+
|
| 33 |
+
our results and discuss possible future work in Section 5. Finally, the appendix contains additional results, code snippets, and key implementation details.
|
| 34 |
+
|
| 35 |
+
Notation: We use $x$ to refer to spatial coordinates in $d$ dimensions. On the other hand, a bolded quantity such as $\mathbf{c}$ or $\mathbf{u}$ indicates a vector with more than $d$ elements (an array). Finally, the $\sim$ symbol on top of a quantity indicates that the quantity is an approximation.
|
| 36 |
+
|
| 37 |
+
Broader Impacts: To the best of the authors' knowledge, there are no negative societal impacts of our work including potential malicious or unintended uses, environmental impact, security, or privacy concerns.
|
| 38 |
+
|
| 39 |
+
# 2 Review
|
| 40 |
+
|
| 41 |
+
We now provide a brief mathematical review of both vanilla-PINNs and RBF-FD discretizations. Unless we note otherwise, all derivatives in this section are spatial or temporal. We focus on three prototypical PDEs: the nonlinear Poisson equation, the linear Poisson equation, and the heat equation.
|
| 42 |
+
|
| 43 |
+
# 2.1 Physics-informed neural networks
|
| 44 |
+
|
| 45 |
+
Let $\Omega \subset \mathbb{R}^d$ be a domain with boundary given by $\partial \Omega$ ; here, $d$ is the spatial dimension. We will focus on the solution of the nonlinear Poisson equation on $\Omega$ using PINNs. Let $x \in \mathbb{R}^d$ , and let $u: \mathbb{R}^d \to \mathbb{R}$ be the solution to
|
| 46 |
+
|
| 47 |
+
$$
|
| 48 |
+
\Delta u (x) = e ^ {u (x)} + f (x), x \in \Omega , \tag {1}
|
| 49 |
+
$$
|
| 50 |
+
|
| 51 |
+
$$
|
| 52 |
+
(\alpha n (x) \cdot \nabla + \beta) u (x) = g (x), x \in \partial \Omega , \tag {2}
|
| 53 |
+
$$
|
| 54 |
+
|
| 55 |
+
where $\Delta$ is the Laplacian in $\mathbb{R}^d$ , $\nabla$ is the $\mathbb{R}^d$ gradient, $n(x)$ is the unit outward normal vector on the boundary $\partial \Omega$ , $f(x)$ and $g(x)$ are known functions, and $\alpha, \beta \in \mathbb{R}$ are known coefficients. If the $e^{u(x)}$ term is dropped from (1), we obtain the simpler linear Poisson equation:
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
\Delta u (x) = f (x), x \in \Omega . \tag {3}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
The vanilla-PINN technique for solving either Poisson problem involves approximating the unknown solution $u(x)$ by a DNN $\tilde{u}(x, \mathbf{w})$ (where $\mathbf{w}$ is a vector of unknown NN weights), so that $\| \tilde{u}(x, \mathbf{w}) - u(x) \| \leq \epsilon$ for some norm $\| . \|$ and some tolerance $\epsilon$ . In the absence of existing solution data, this is accomplished by enforcing (1) and (2) as soft constraints on $\tilde{u}(x)$ to find the weights $\mathbf{w}$ during training. Denote by $X = \{x_k\}_{k=1}^N$ the set of training points at which these constraints are enforced; in the context of PDEs, these are also called collocation points. For convenience, we divide $X$ into two sets: $N_i$ interior points in the set $X_i$ and $N_b$ boundary points in the set $X_b$ ; then, $X = X_i \cup X_b$ and $N = N_i + N_b$ . Further, let $\mathcal{B} = \alpha n(x) \cdot \nabla + \beta$ . The vanilla-PINN training loss $e(x, \mathbf{w})$ can then be written as:
|
| 62 |
+
|
| 63 |
+
$$
|
| 64 |
+
e (x, \mathbf {w}) = \frac {1}{N _ {i}} \sum_ {j = 1} ^ {N _ {i}} \left(\Delta \tilde {u} (x, \mathbf {w}) | _ {x = x _ {j}} - e ^ {\tilde {u} (x _ {j})} - f (x _ {j})\right) ^ {2} + \frac {1}{N _ {b}} \sum_ {i = 1} ^ {N _ {b}} \left(\mathcal {B} \tilde {u} (x, \mathbf {w}) | _ {x = x _ {i}} - g (x _ {i})\right) ^ {2}, \tag {4}
|
| 65 |
+
$$
|
| 66 |
+
|
| 67 |
+
PDE loss in interior
|
| 68 |
+
|
| 69 |
+
Boundary condition loss on boundary
|
| 70 |
+
|
| 71 |
+
where $\Delta$ and the $\nabla$ term in $\mathcal{B}$ are both applied through autograd. The tanh activation function is typically used, $L$ -BFGS is used as the optimizer for finding the weights $w$ , and training is typically done in fp32 (17). For the linear Poisson equation, one simply omits the $e^{\tilde{u}}$ term from the loss above.
|
| 72 |
+
|
| 73 |
+
For time-dependent PDEs, the PINN becomes a function of space and time $\tilde{u} (x,t)$ . We focus on the forced heat equation, given by
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
\frac {\partial u (x , t)}{\partial t} = \Delta u (x, t) + f (x, t), x \in \Omega , \tag {5}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\mathcal {B} u (x, t) = g (x, t), x \in \partial \Omega , \tag {6}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
u (x, 0) = u _ {0} (x), \tag {7}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
where (7) is an initial condition and $u_0(x)$ is some known function. While the $\Delta$ term is handled via autograd, there are two options to handle temporal derivatives: in a continuous fashion or a time-discrete fashion. In the former, one samples the full space-time interval $\Omega \times [0,T]$ with collocation/training points, and then uses autograd to compute all spatial and temporal derivatives. The loss terms are also augmented with the initial condition (7), which is enforced on the full space-time solution. In the time-discrete approach, one typically discretizes the time derivative using an appropriate scheme (such as a Runge-Kutta method), and then proceeds in a step by step fashion. We focus on the continuous approach in this work.
|
| 88 |
+
|
| 89 |
+
# 2.2 Radial basis function-finite differences (RBF-FD)
|
| 90 |
+
|
| 91 |
+
We now briefly review RBF-FD methods. Given some function $f: \mathbb{R}^d \to \mathbb{R}$ , the goal of any FD formula is to approximate the action of a linear operator $\mathcal{L}$ on that function (i.e., to approximate $\mathcal{L}f$ ) at some location $x_1$ . This is typically accomplished by using a weighted linear combination of $f$ at $x_1$ and its $n - 1$ nearest neighbors. Mathematically, this can be written as:
|
| 92 |
+
|
| 93 |
+
$$
|
| 94 |
+
\left. \mathcal {L} f (x) \right| _ {x = x _ {1}} \approx \sum_ {k = 1} ^ {n} c _ {k} f \left(x _ {k}\right), \tag {8}
|
| 95 |
+
$$
|
| 96 |
+
|
| 97 |
+
where the real numbers $c_{k}$ are called FD weights, and the set of points $x_{1},\ldots ,x_{n}$ is called an FD stencil. In general, given a set of samples $X = \{x_{j}\}_{j = 1}^{N}$ , one can repeat the above procedure to find FD weights at every single point. These weights can be assembled into an $N\times N$ differentiation matrix $\bar{L}$ so that $\mathcal{L}f(x)|_X\approx Lf(x)|_X$ . If $n < < N$ , $L$ will be a sparse matrix with at most $n$ non-zero elements per row. If $X$ lies on a Cartesian grid, the entries of $L$ (i.e., the FD weights $c_{k}$ ) are known in advance. However, if $X$ is a more general point cloud, standard FD cannot be used to generate the entries of $L$ (see Mairhuber-Curtis theorem (7)). The RBF-FD method involves using an interpolatory combination of RBFs and polynomials instead. Without loss of generality, we describe the RBF-FD procedure for $x_{1}$ and its $n - 1$ nearest neighbors. Let $\phi (r) = r^m$ , where $m$ is odd, be a radial kernel (a polyharmonic spline), and $q_{j}(x), j = 1,\dots ,\binom{\ell +d}{d}$ be a basis for polynomials of total degree $\ell$ in $d$ dimensions; we use tensor-product Legendre polynomials. The RBF-FD weights for the operator $\mathcal{L}$ at the point $x_{1}$ are computed by solving the following dense (block) linear system on this stencil:
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\left[ \begin{array}{c c} A & P \\ P ^ {T} & 0 \end{array} \right] \left[ \begin{array}{l} \mathbf {c} \\ \lambda \end{array} \right] = \left[ \begin{array}{l} \mathcal {L} a \\ \mathcal {L} q \end{array} \right], \tag {9}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
A _ {i j} = \phi \left(\left\| x _ {i} - x _ {j} \right\|\right), i, j = 1, \dots , n, \quad P _ {i j} = q _ {j} \left(x _ {i}\right), i = 1, \dots , n, j = 1, \dots , \binom {\ell + d} {d}, \tag {10}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\mathcal {L} a = \mathcal {L} \phi (\| x - x _ {j} \|) | _ {x = x _ {1}}, \quad \mathcal {L} q = \left. \mathcal {L} q _ {j} (x) \right| _ {x = x _ {1}}, j = 1, \dots , \binom {\ell + d} {d}, \tag {11}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
where $\mathbf{c}$ is the (column) vector of $n$ RBF-FD weights. The vector $\lambda$ is a set of Lagrange multipliers enforcing the condition $P^T\mathbf{c} = \mathcal{L}q$ , thereby ensuring that (a) the RBF-FD weights $\mathbf{c}$ can exactly differentiate all polynomials up to total degree $\ell$ ; and that (b) the error in the RBF-FD approximation to $\mathcal{L}$ when applied to all other functions is $O(h^{\ell +1 - \theta})$ , where $0\leq h\leq 1$ is a measure of sample spacing in the stencil, and $\theta$ is the number of derivatives in the differential operator $\mathcal{L}$ (6). We set $\ell = p + \theta -1$ based on the desired order of convergence $p$ so that the error is $O(h^{p})$ . We then set the stencil size to $n = 2\binom{\ell +d}{d} + 1$ as this ensures that (9) has a solution (2), and also set $m = \ell$ if $\ell$ is odd, and $m = \ell -1$ if $\ell$ is even (29). $L$ becomes more dense for higher values of $p$ and dimension $d$ , as $n = O(p^d)$ . When this procedure is repeated for each point in the set $X$ , the cost scales as $O(N)$ for fixed $n$ , with large speedups possible by computing multiple sets of weights using each stencil (28; 29; 31; 34; 32). For domains with fixed boundaries, the RBF-FD weights can be precomputed and reused during simulation. However, domains with moving boundaries require recomputation of RBF-FD weights proximal to the boundary every time-step; fortunately, this can be done quite efficiently (32).
|
| 114 |
+
|
| 115 |
+
Ghost points When tackling boundary conditions involving derivatives (such as in (2)) using RBF-FD, it is common to include a set of $N_{b}$ ghost points outside the domain boundary $\partial \Omega$ into the set of samples to ensure that RBF-FD stencils at the boundary are less one-sided; this aids in numerical stability and accuracy. Ghost points allow us to also enforce the PDE at both the interior and boundary points. We therefore define and use the extended set $\tilde{X} = X_{i} \cup X_{b} \cup X_{g}$ , where $X_{g}$ is the set of ghost points. For the remainder of this article, let the RBF-FD differentiation matrix for $\Delta$ be $L$ (dimensions $(N_{i} + N_{b}) \times (N_{i} + 2N_{b})$ ), and for $B$ be $B$ (dimensions $N_{b} \times (N_{i} + 2N_{b})$ ).
|
| 116 |
+
|
| 117 |
+
# 3 Discretely-Trained PINNs (DT-PINNs)
|
| 118 |
+
|
| 119 |
+
Having described both vanilla-PINNs and RBF-FD, we are now ready to describe DT-PINNs. In short, DT-PINNs are PINNs that are trained using the sparse differentiation matrices $L$ and $B$ in place of the autograd operations used to compute the Laplacian and boundary operators in the loss function (4) (and its heat equation equivalent). All operations are carried out in fp64.
|
| 120 |
+
|
| 121 |
+
Poisson Equation Focusing first on the nonlinear Poisson equation (1), recall that $\tilde{u}(x, \mathbf{w})$ is the PINN approximation to the true solution $u(x)$ . Let the evaluation of $\tilde{u}(x, \mathbf{w})$ on the set $\tilde{X}$ be $\tilde{\mathbf{u}}$ , i.e., $\tilde{\mathbf{u}}$ is obtained by evaluating $\tilde{u}(x, \mathbf{w})$ at interior, boundary, and ghost points. Further define the vector $\mathbf{e}$ , which is the loss function evaluated at only the interior and boundary points, i.e., $\mathbf{e} = e(x, \mathbf{w})|_{X}$ . Then, the DT-PINN loss function can be written as:
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\mathbf {e} = \underbrace {\frac {1}{N _ {i} + N _ {b}} \| L \tilde {\boldsymbol {u}} - \exp (\tilde {\boldsymbol {u}}) - \mathbf {f} \| _ {2} ^ {2}} _ {\text {P D E l o s s i n i n t e r i o r a n d o n b o u n d a r y}} + \underbrace {\frac {1}{N _ {b}} \| B \tilde {\boldsymbol {u}} - \mathbf {g} \| _ {2} ^ {2}} _ {\text {B o u n d a r y c o n d i t i o n l o s s o n b o u n d a r y}}, \tag {12}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
where $L$ and $B$ were defined previously, $\exp(\tilde{\mathbf{u}})$ is the element-wise exponential of the vector $\tilde{\mathbf{u}}$ , and $\mathbf{f} = f(x)|_{X}$ , and $\mathbf{g} = g(x)|_{X_b}$ ; here, $\mathbf{f}$ has dimension $(N_i + N_b) \times 1$ , and $\mathbf{g}$ has dimension $N_b \times 1$ . For efficiency, $L$ and $B$ can be precomputed using RBF-FD before the training process begins, and then simply multiplied with the vector $\tilde{\mathbf{u}}$ to obtain its numerical derivatives. The loss function (12) is then minimized over $\mathbf{w}$ as usual using autograd in conjunction with a suitable optimizer. For the linear Poisson equation (3), we simply drop the $\exp(\tilde{\mathbf{u}})$ term.
|
| 128 |
+
|
| 129 |
+
Heat Equation When using DT-PINNs for the heat equation, we demonstrate the flexibility of our method by using a mixed training technique where the time derivative is handled with autograd and the spatial derivatives are discretized with RBF-FD; this also allows us to bypass the Courant-Friedrichs-Lewy (CFL) constraint on the time-step. We carefully order the evaluations of the network so that $L$ and $B$ multiply the right quantities. Let $\tilde{u}(x,t,\mathbf{w})$ be the PINN, and recall that we have $N_{t}$ time steps over the interval $[0,T]$ ; in addition, we also have the initial condition at time $t = 0$ , making for a total of $N_{t} + 1$ steps. Define $\tilde{\boldsymbol{u}}_k = \tilde{u}|_{x = \tilde{X},t = k\triangle t}$ , where $\triangle t$ is the timestep. This vector is the evaluation of $\tilde{u}$ on all spatial locations (including ghost nodes) for the $k$ -th time slice. This definition in turn allows us to define two vectors, $\tilde{u}_{\Delta}$ and $\tilde{u}_{\mathcal{B}}$ as follows:
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
\tilde {\boldsymbol {u}} _ {\Delta} = \left[ \begin{array}{c} L \tilde {\boldsymbol {u}} _ {0} \\ L \tilde {\boldsymbol {u}} _ {1} \\ \vdots \\ L \tilde {\boldsymbol {u}} _ {N _ {t}} \end{array} \right], \quad \tilde {\boldsymbol {u}} _ {\mathcal {B}} = \left[ \begin{array}{c} B \tilde {\boldsymbol {u}} _ {0} \\ B \tilde {\boldsymbol {u}} _ {1} \\ \vdots \\ B \tilde {\boldsymbol {u}} _ {N _ {t}} \end{array} \right]. \tag {13}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
The vector $\tilde{\pmb{u}}_{\Delta}$ has dimensions $(N_t + 1)(N_i + N_b)\times 1$ , and $\tilde{\pmb{u}}_{\mathcal{B}}$ has dimensions $N_{t}N_{b}\times 1$ . Next, we define the data vectors $\mathbf{f}$ and $\mathbf{g}$ as follows:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\mathbf {f} = \left[ \begin{array}{c} \mathbf {f} _ {0} \\ \mathbf {f} _ {1} \\ \vdots \\ \mathbf {f} _ {N _ {t}} \end{array} \right], \quad \mathbf {g} = \left[ \begin{array}{c} \mathbf {g} _ {0} \\ \mathbf {g} _ {1} \\ \vdots \\ \mathbf {g} _ {N _ {t}} \end{array} \right], \tag {14}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
where $\mathbf{f}_k = f(x,t)|_{x = X,t = k\triangle t}$ , and $\mathbf{g}_k = g(x,t)|_{x = X_b,t = k\triangle t}$ . Finally, we define two more vectors: $\boldsymbol{u}_0 = u_0(x)|_X$ , the vector evaluating the initial condition on the set $X$ (interior and boundary points); and $\tilde{\boldsymbol{u}}_t$ , the vector of evaluations of $\frac{\partial\tilde{u}}{\partial t}$ at spatial locations (interior and boundary) for each time slice:
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\tilde {\boldsymbol {u}} _ {t} = \left[ \begin{array}{c} \left(\frac {\partial \tilde {\boldsymbol {u}}}{\partial t}\right) _ {0} \\ \left(\frac {\partial \tilde {\boldsymbol {u}}}{\partial t}\right) _ {1} \\ \vdots \\ \left(\frac {\partial \tilde {\boldsymbol {u}}}{\partial t}\right) _ {N _ {t}} \end{array} \right], \tag {15}
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
where $\left(\frac{\partial\tilde{u}}{\partial t}\right)_k = \left.\frac{\partial\tilde{u}}{\partial t}\right|_{x = X,t = k\triangle t}$ . This vector is computed using autograd. With these different vectors defined, we can finally write the DT-PINN loss vector $\mathbf{e}$ for the heat equation as
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\mathbf {e} = \underbrace {\frac {1}{N _ {i} + N _ {b}} \| \boldsymbol {u} _ {0} - \tilde {u} | _ {x = X , t = 0} \| _ {2} ^ {2}} _ {\text {I n i t i a l c o n d i t i o n}} + \underbrace {\frac {1}{(N _ {t} + 1) (N _ {i} + N _ {b})} \| \tilde {\boldsymbol {u}} _ {t} - \tilde {\boldsymbol {u}} _ {\Delta} - \mathbf {f} \| _ {2} ^ {2}} _ {\text {P D E l o s s i n i n t e r i o r a n d o n b o u n d a r y}} + \underbrace {\frac {1}{(N _ {t} + 1) N _ {b}} \| \tilde {\boldsymbol {u}} _ {\mathcal {B}} - \mathbf {g} \| _ {2} ^ {2}} _ {\text {B o u n d a r y c o n d i t i o n l o s s o n b o u n d a r y}}.
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+

|
| 154 |
+
(a)
|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
(b)
|
| 158 |
+
Figure 1: Quasi-uniform collocation points and the manufactured solution on the unit disk. The figure shows (a) $N = 1663$ interior and boundary collocation points on the unit disk and (b) the manufactured solution given by (19).
|
| 159 |
+
|
| 160 |
+
# 4 Results
|
| 161 |
+
|
| 162 |
+
We now present experimental results comparing DT-PINN and vanilla-PINN performance on the linear Poisson equation (3), the nonlinear Poisson equation (1), and the forced heat equation (5).
|
| 163 |
+
|
| 164 |
+
Setup All experiments were run for 5000 epochs on an NVIDIA GeForce RTX 2070. All results are reproducible with the seeds we used in the experiments. We used the $L$ -BFGS optimizer with manually fine-tuned learning rates for both vanilla-PINNs and DT-PINNs. Both DT-PINNs and vanilla-PINNs used a constant NN depth of $s = 4$ layers with 50 nodes each across all runs. We use quasi-uniform collocation points generated using a node generator (30). Figure 1b shows the manufactured solution as specified in (19). For the Poisson experiments, we report errors on a test set of $N_{test} = 21748$ points. For the heat equation, we report results directly at the collocation points for convenience. For all experiments, the spatial domain $\Omega$ is set to the unit disk
|
| 165 |
+
|
| 166 |
+
$$
|
| 167 |
+
\Omega = \{x \in \mathbb {R} ^ {d} \mid \| x \| _ {2} ^ {2} \leq 1 \}. \tag {17}
|
| 168 |
+
$$
|
| 169 |
+
|
| 170 |
+
For illustration, we show one of the point sets on the unit disk in Figure 1a (with $N = 1663$ points). In the 2D heat equation experiment, the space-time domain is chosen to be $\Omega \times [0,1]$ . The time interval $[0,1]$ is evenly divided into 24 time steps so that $N_{t} = 24$ (excluding $t = 0$ ), and the time-step was set to $\triangle t = \frac{1}{24}$ . We measure all errors against a manufactured (specified) solution $u$ , and specify $f$ so that the solution holds true. The boundary condition term $g$ is computed by applying the operator $\mathcal{B}$ to $u$ ; we use $\alpha = \beta = 1$ for all tests. To compare DT-PINNs and vanilla-PINNs to the manufactured solution $u$ , we report the relative $\ell_2$ error
|
| 171 |
+
|
| 172 |
+
$$
|
| 173 |
+
e _ {\ell_ {2}} = \frac {\| \tilde {\boldsymbol {u}} - \boldsymbol {u} \| _ {2}}{\| \boldsymbol {u} \| _ {2}}, \tag {18}
|
| 174 |
+
$$
|
| 175 |
+
|
| 176 |
+
where $\pmb{u}$ is the true solution vector, and $\tilde{\pmb{u}}$ is either the DT-PINN or vanilla-PINN solution vector.
|
| 177 |
+
|
| 178 |
+
# 4.1 Effect of neural network depth
|
| 179 |
+
|
| 180 |
+
We first study the effect of PINN depth (fixing the number of nodes per layer) $s$ on computing the Laplacian $\Delta$ of the output with respect to the spatial variable $x$ using either autograd or RBF-FD. We compute errors against fp64 autograd for fp32 autograd and for RBF-FD with $p = 2, 3, 4$ , and 5. All errors were computed on $N = 19638$ quasi-uniform collocation points. The results are in shown in Figure 2a. We see $p = 4$ and $p = 5$ are more accurate than fp32 autograd, and that increasing $p$ increases the accuracy of RBF-FD by about two orders of magnitude. The errors are reasonably low for $p = 3$ also. In Figure 2b, we report the time taken for the same test. It is immediately clear that fp64 autograd is significantly more expensive than the fp32 variant, though both costs scale slowly with the network depth $s$ . More importantly, the time taken for fp64 RBF-FD (for all orders) is both lower than both fp32 and fp64 autograd and is independent of the network depth $s$ , primarily since the RBF-FD weights can be precomputed and repeatedly reused during training.
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
(a)
|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
(b)
|
| 187 |
+
Figure 2: Autograd properties as a function of network depth $s$ . The figure shows (a) effect of neural network depth $s$ on the relative error (with respect to fp64 autograd) and (b) time taken for one application of autograd on fp32 and fp64, compared to the time taken for SpMV using RBF-FD. The RBF-FD weights for $N = 19638$ collocation points were precomputed using an efficient CPU code in approximately 0.1s. Error bars over 15 random runs are shown.
|
| 188 |
+
|
| 189 |
+
# 4.2 Linear Poisson equation
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
(a)
|
| 193 |
+
Figure 3: fp64 DT-PINNs and fp32 vanilla-PINN results on the linear Poisson equation (3) for different numbers of collocation points $(N)$ and orders of accuracy $(p)$ . We show (a) the relative error in the PINN solution; (b) the time taken to converge to lowest relative error; and (c) the speedup attained by fp64 DT-PINNs relative to fp32 vanilla-PINN for those times. Error bars over 5 random runs are shown.
|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
(b)
|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
(c)
|
| 200 |
+
|
| 201 |
+
Next, we study the performance of fp64 DT-PINNs and fp32 vanilla-PINNs on the linear Poisson equation (3) on the domain (17). Letting $x = [x_{1}, x_{2}]$ , we specify the true solution $u$ to be
|
| 202 |
+
|
| 203 |
+
$$
|
| 204 |
+
u (x) = u \left(x _ {1}, x _ {2}\right) = 1 + \sin \left(\pi x _ {1}\right) \cos \left(\pi x _ {2}\right), \tag {19}
|
| 205 |
+
$$
|
| 206 |
+
|
| 207 |
+
and enforce this by setting $f = \Delta u$ ; the true solution $u$ is shown in Figure 1b. We then solve for $\tilde{u}$ as described in Section 3. The results of this experiment are shown in Figure 3. We present relative errors (Figure 3a), wall clock time (Figure 3b), and speedup (Figure 3c). We also present results for fp64 vanilla PINNs. It is important to note that fp64 DT-PINNs were completely stored and trained in fp64, a format widely known to be significantly slower on the GPU than fp32.
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
Figure 3a shows the relative errors for DT-PINNs as a function of the number of collocation points $N$ . DT-PINNs for $p = 3, 4, 5$ produce similar relative errors to both fp32 and fp64 vanilla-PINNs for the same value of $N$ . In contrast, the DT-PINN using $p = 2$ is generally less accurate, showing that higher-order accuracy is needed to reach the same relative errors as vanilla-PINNs. Examining Figures 3b and 3c, we also see that all fp64 DT-PINNs can be trained much more rapidly than both fp32 and fp64 vanilla-PINNs. In fact, Figure 3c shows a maximum training speedup of 4x for DT-PINNs even if $p = 2$ is ignored. In general, fp64 DT-PINNs for $p > 2$ are trained much more quickly than vanilla-PINNs without a significant loss in accuracy. We also note that using fp32 DT-PINNs did not lead to greater speedups over the fp64 DT-PINNs, with a loss in accuracy. These results are shown in Appendix A.1.2.
|
| 211 |
+
(a)
|
| 212 |
+
|
| 213 |
+

|
| 214 |
+
(b)
|
| 215 |
+
Figure 4: Number of training epochs to achieve the lowest relative error as a function of number of collocation points $N$ and order $p$ for (a) fp32 DT-PINNs and (b) fp64 DT-PINNs. Error bars over 5 random runs are shown.
|
| 216 |
+
|
| 217 |
+
The superior performance of fp64 DT-PINNs becomes clearer when we examine the number of training epochs as a function of the number of collocation points $N$ (Figure 4). Figures 4a and 4b both illustrate that both fp32 and fp64 DT-PINNs reach their lowest relative errors in fewer epochs than vanilla-PINNs. These results provide evidence that DT-PINNs have simpler loss function landscapes than their vanilla-PINN counterparts, also implying that loss functions involving linear combinations of NN values are easier to minimize than loss functions involving derivatives of NNs. Figure 4b also shows that only fp64 DT-PINNs take fewer epochs to train as $N$ is increased. We also see that moving to fp64 does not appear to significantly speed up vanilla-PINNs. It is therefore the combination of discrete training and fp64 that results in speedups for increasing $N$ .<sup>1</sup>
|
| 218 |
+
|
| 219 |
+
# 4.3 Nonlinear Poisson equation
|
| 220 |
+
|
| 221 |
+
Next, to understand the influence of nonlinearities in terms not including the differential operator, we test the performance of DT-PINNs on the nonlinear Poisson equation (1). To measure errors, we use the manufactured solution given by (19), and set $f = \Delta u - e^{u}$ . The results are shown in Figure 5; for simplicity, we omit $p = 2$ and fp64 vanilla-PINNs as both these have poor cost-accuracy tradeoffs. First, Figure 5a shows that despite some outliers, fp64 DT-PINNs achieve comparable relative errors to fp32 vanilla-PINNs. Further, Figure 5b shows that DT-PINNs are still trained faster than vanilla-PINNs. However, when comparing Figure 5c to Figure 3c (linear Poisson equation), we see that the average speedup is higher for the linear Poisson equation. This shows that DT-PINNs may not offer speedups over vanilla-PINNs if terms not involving differential operators dominate training times.
|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
(a)
|
| 225 |
+
|
| 226 |
+

|
| 227 |
+
(b)
|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
(c)
|
| 231 |
+
Figure 5: fp64 DT-PINNs and fp32 vanilla-PINN results on the nonlinear Poisson equation (1) for different numbers of collocation points $(N)$ and orders of accuracy $(p)$ . We show (a) the relative error in the PINN solution; (b) the time taken to converge to lowest relative error; and (c) the speedup attained by fp64 DT-PINNs relative to fp32 vanilla-PINN for those times. Error bars over 5 random runs are shown.
|
| 232 |
+
|
| 233 |
+
# 4.4 Heat equation
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
(a)
|
| 237 |
+
Figure 6: fp64 DT-PINN and fp32 vanilla-PINN results on the heat equation on $N = 828$ spatial points and $N_{t} = 24$ time-steps. The figure shows (a) the relative error for fp64 DT-PINNs as a function of approximation order $p$ ; and (b) the speedup attained by fp64 DT-PINNs over fp32 vanilla-PINNs as a function of $p$ . Error bars over 5 random runs are shown.
|
| 238 |
+
|
| 239 |
+

|
| 240 |
+
(b)
|
| 241 |
+
|
| 242 |
+
Next, we compare fp64 DT-PINNs and fp32 vanilla-PINNs on the 2D heat equation. In order to demonstrate the flexibility of our method, we adopt a mixed training approach where only spatial derivatives are discretized with RBF-FD. We specify the true solution $u$ to be
|
| 243 |
+
|
| 244 |
+
$$
|
| 245 |
+
u (x, t) = u \left(x _ {1}, x _ {2}, t\right) = 1 + \sin \left(\pi x _ {1}\right) \cos \left(\pi x _ {2}\right) \sin (\pi t), \tag {20}
|
| 246 |
+
$$
|
| 247 |
+
|
| 248 |
+
and specify $f = \frac{\partial u}{\partial t} - \Delta u$ so that the solution $u$ satisfies the heat equation for all space-time. We compute the initial condition as $u_0(x, 0) = u(x_1, x_2, 0) = 1$ . We trained on $N = 828$ spatial collocation points over 25 time slices (including time $t = 0$ ) for a total of 20,700 spacetime collocation points; we express all results as a function of $p$ . These results are shown in Figure 6.
|
| 249 |
+
|
| 250 |
+
First, Figure 6a shows similar results to the 2D Poisson equation, with $p > 2$ achieving relative errors similar to fp32 DT-PINNs. Figure 6b shows that we achieve 2-4x speedups over vanilla-PINNs. We observed in our experiments that the speedup appears to increase as a function of the number of time-steps $N_{t}$ (results not shown). It is likely that one could achieve further speedups by also discretizing the temporal derivatives, but we leave this exploration for future work.
|
| 251 |
+
|
| 252 |
+
# 5 Summary and future work
|
| 253 |
+
|
| 254 |
+
We presented a novel technique, DT-PINNs, that involves training PINNs by using RBF-FD for spatial derivatives, and using fp64 weights and training instead of fp32. This involved replacing all autograd operations (dense matrix-matrix multiplies) related to PDE loss terms with an SpMV operation. We showed that using an RBF-FD approximation order of $p > 2$ resulted in DT-PINNs that were comparable in accuracy to vanilla-PINNs while offering 2-4x speedups in training times for both the linear and nonlinear Poisson equations. We also showed that DT-PINNs trained in a mixed fashion (autograd for time, RBF-FD for space) also achieved comparable accuracy and speedup on the heat equation. DT-PINNs therefore constitute a new paradigm for scientific machine learning that allow practitioners to leverage existing sophisticated scientific computing techniques to accelerate ML training times.
|
| 255 |
+
|
| 256 |
+
There are several possible extensions to our current work. It is likely that using DT-PINNs in conjunction with X-PINNs and G-PINNs will yield even greater speedups in training times. Further, DT-PINNs open the door to leveraging compute more efficiently. For instance, the SpMV operations could be parallelized using distributed memory systems in conjunction with GPUs, thereby allowing scaling to very large training sets; alternatively, the SpMV operation could be parallelized on many-core CPUs while other operations are conducted on the GPU. It may also be profitable to explore mixed-precision training of DT-PINNs. Finally, DT-PINNs can be viewed as vanilla-PINNs with partially linearized constraints; it may be profitable to explore other types of constraint linearization to accelerate training and simplify loss function landscapes.
|
| 257 |
+
|
| 258 |
+
# Acknowledgments and Disclosure of Funding
|
| 259 |
+
|
| 260 |
+
VS was supported by National Science Foundation (NSF) grant CCF 1714844.
|
| 261 |
+
|
| 262 |
+
# References
|
| 263 |
+
|
| 264 |
+
[1] Barnett, G. A. (2015). A Robust RBF-FD Formulation based on Polyharmonic Splines and Polynomials. PhD thesis, University of Colorado Boulder.
|
| 265 |
+
[2] Bayona, V., Flyer, N., Fornberg, B., and Barnett, G. A. (2017). On the role of polynomials in RBF-FD approximations: II. Numerical solution of elliptic PDEs. J. Comput. Phys., 332:257-273.
|
| 266 |
+
[3] Bayona, V., Moscoso, M., Carretero, M., and Kindelan, M. (2010). RBF-FD formulas and convergence properties. J. Comput. Phys., 229(22):8281-8295.
|
| 267 |
+
[4] Bettencourt, J., Johnson, M. J., and Duvenaud, D. (2019). Taylor-mode automatic differentiation for higher-order derivatives in jax.
|
| 268 |
+
[5] Davydov, O. and Oanh, D. T. (2011). Adaptive meshless centres and RBF stencils for Poisson equation. J. Comput. Phys., 230(2):287-304.
|
| 269 |
+
[6] Davydov, O. and Schaback, R. (2018). Minimal numerical differentiation formulas. Numerische Mathematik, 140(3):555-592.
|
| 270 |
+
[7] Fasshauer, G. E. (2007). Meshfree Approximation Methods with MATLAB. Interdisciplinary Mathematical Sciences - Vol. 6. World Scientific Publishers, Singapore.
|
| 271 |
+
[8] Flyer, N., Barnett, G. A., and Wicker, L. J. (2016a). Enhancing finite differences with radial basis functions: Experiments on the Navier-Stokes equations. J. Comput. Phys., 316:39-62.
|
| 272 |
+
[9] Flyer, N., Fornberg, B., Bayona, V., and Barnett, G. A. (2016b). On the role of polynomials in RBF-FD approximations: I. Interpolation and accuracy. J. Comput. Phys., 321:21-38.
|
| 273 |
+
|
| 274 |
+
[10] Flyer, N., Lehto, E., Blaise, S., Wright, G. B., and St-Cyr, A. (2012). A guide to RBF-generated finite differences for nonlinear transport: shallow water simulations on a sphere. J. Comput. Phys., 231:4078-4095.
|
| 275 |
+
[11] Flyer, N. and Wright, G. B. (2007). Transport schemes on a sphere using radial basis functions. J. Comput. Phys., 226:1059-1084.
|
| 276 |
+
[12] Flyer, N. and Wright, G. B. (2009). A radial basis function method for the shallow water equations on a sphere. Proc. Roy. Soc. A, 465:1949-1976.
|
| 277 |
+
[13] Fornberg, B. and Lehto, E. (2011). Stabilization of RBF-generated finite difference methods for convective PDEs. J. Comput. Phys., 230:2270-2285.
|
| 278 |
+
[14] Fuselier, E. J. and Wright, G. B. (2013). A high-order kernel method for diffusion and reaction-diffusion equations on surfaces. J. Sci. Comput., 56(3):535-565.
|
| 279 |
+
[15] Han, J., Jentzen, A., and E, W. (2018). Solving high-dimensional partial differential equations using deep learning. Proceedings of the National Academy of Sciences, 115(34):8505-8510.
|
| 280 |
+
[16] Jagtap, A. D. and Karniadakis, G. E. (2020). Extended physics-informed neural networks (xpinns): A generalized space-time domain decomposition based deep learning framework for nonlinear partial differential equations. Communications in Computational Physics, 28(5):2002-2041.
|
| 281 |
+
[17] Krishnapriyan, A., Gholami, A., Zhe, S., Kirby, R., and Mahoney, M. W. (2021). Characterizing possible failure modes in physics-informed neural networks. In Beygelzimer, A., Dauphin, Y., Liang, P., and Vaughan, J. W., editors, Advances in Neural Information Processing Systems.
|
| 282 |
+
[18] Lehto, E., Shankar, V., and Wright, G. B. (2017). A radial basis function (RBF) compact finite difference (FD) scheme for reaction-diffusion equations on surfaces. SIAM J. Sci. Comput., 39:A2129-A2151.
|
| 283 |
+
[19] LeVeque, R. J. (2007). Finite difference methods for ordinary and partial differential equations: steady-state and time-dependent problems. SIAM.
|
| 284 |
+
[20] Long, Z., Lu, Y., Ma, X., and Dong, B. (2017). Pde-net: Learning pdes from data.
|
| 285 |
+
[21] Okuta, R., Unno, Y., Nishino, D., Hido, S., and Loomis, C. (2017). Copy: A numpy-compatible library for nvidiagpu calculations. In Proceedings of Workshop on Machine Learning Systems (LearningSys) in The Thirty-first Annual Conference on Neural Information Processing Systems (NIPS).
|
| 286 |
+
[22] Pang, G., Lu, L., and Karniadakis, G. E. (2019). fpinns: Fractional physics-informed neural networks. SIAM Journal on Scientific Computing, 41(4):A2603-A2626.
|
| 287 |
+
[23] Pang, T., Xu, K., LI, C., Song, Y., Ermon, S., and Zhu, J. (2020). Efficient learning of generative models via finite-difference score matching. In Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., and Lin, H., editors, Advances in Neural Information Processing Systems, volume 33, pages 19175-19188. Curran Associates, Inc.
|
| 288 |
+
[24] Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al. (2019). Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32.
|
| 289 |
+
[25] Piret, C. (2012). The orthogonal gradients method: A radial basis functions method for solving partial differential equations on arbitrary surfaces. J. Comput. Phys., 231(20):4662-4675.
|
| 290 |
+
[26] Piret, C. and Dunn, J. (2016). Fast RBF OGr for solving pdes on arbitrary surfaces. AIP Conference Proceedings, 1776(1).
|
| 291 |
+
[27] Raissi, M., Perdikaris, P., and Karniadakis, G. (2019). Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations. Journal of Computational Physics, 378:686-707.
|
| 292 |
+
|
| 293 |
+
[28] Shankar, V. (2017). The overlapped radial basis function-finite difference (RBF-FD) method: A generalization of RBF-FD. J. Comput. Phys., 342:211-228.
|
| 294 |
+
[29] Shankar, V. and Fogelson, A. L. (2018). Hyperviscosity-based stabilization for radial basis function-finite difference (rbf-fd) discretizations of advection-diffusion equations. J. Comput. Phys., 372:616 - 639.
|
| 295 |
+
[30] Shankar, V., Kirby, R., and Fogelson, A. (2018a). Robust node generation for mesh-free discretizations on irregular domains and surfaces. SIAM Journal on Scientific Computing, 40(4):A2584-A2608.
|
| 296 |
+
[31] Shankar, V., Narayan, A., and Kirby, R. M. (2018b). Rbf-loi: Augmenting radial basis functions (rbfs) with least orthogonal interpolation (loi) for solving pdes on surfaces. Journal of Computational Physics, 373:722-735.
|
| 297 |
+
[32] Shankar, V., Wright, G. B., and Fogelson, A. L. (2021). An efficient high-order meshless method for advection-diffusion equations on time-varying irregular domains. Journal of Computational Physics, 445:110633.
|
| 298 |
+
[33] Shankar, V., Wright, G. B., Kirby, R. M., and Fogelson, A. L. (2014). A radial basis function (RBF)-finite difference (FD) method for diffusion and reaction-diffusion equations on surfaces. J. Sci. Comput., 63(3):745-768.
|
| 299 |
+
[34] Shankar, V., Wright, G. B., and Narayan, A. (2020). A robust hyperviscosity formulation for stable RBF-FD discretizations of Advection-Diffusion-Reaction equations on manifolds. SIAM Journal on Scientific Computing, 42(4):A2371-A2401.
|
| 300 |
+
[35] Shi, Z., Gulgec, N. S., Berahas, A. S., Pakzad, S. N., and Takáč, M. (2020). Finite difference neural networks: Fast prediction of partial differential equations. In 2020 19th IEEE International Conference on Machine Learning and Applications (ICMLA), pages 130-135. IEEE.
|
| 301 |
+
[36] Strang, G., Fix, G. J., and Griffin, D. (1974). An analysis of the finite-element method.
|
| 302 |
+
[37] Wright, G. B. and Fornberg, B. (2006). Scattered node compact finite difference-type formulas generated from radial basis functions. J. Comput. Phys., 212(1):99-123.
|
| 303 |
+
[38] Yu, J., Lu, L., Meng, X., and Karniadakis, G. E. (2022). Gradient-enhanced physics-informed neural networks for forward and inverse pde problems. Computer Methods in Applied Mechanics and Engineering, 393:114823.
|
| 304 |
+
|
| 305 |
+
# Checklist
|
| 306 |
+
|
| 307 |
+
1. For all authors...
|
| 308 |
+
|
| 309 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 310 |
+
(b) Did you describe the limitations of your work? [Yes]
|
| 311 |
+
(c) Did you discuss any potential negative societal impacts of your work? [No] To the best of the authors' knowledge, there are no negative societal impacts of our work including potential malicious or unintended uses, environmental impact, security, or privacy concerns.
|
| 312 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 313 |
+
|
| 314 |
+
2. If you are including theoretical results...
|
| 315 |
+
|
| 316 |
+
(a) Did you state the full set of assumptions of all theoretical results? [N/A]
|
| 317 |
+
(b) Did you include complete proofs of all theoretical results? [N/A]
|
| 318 |
+
|
| 319 |
+
3. If you ran experiments...
|
| 320 |
+
|
| 321 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes]
|
| 322 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes]
|
| 323 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [Yes]
|
| 324 |
+
|
| 325 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [Yes]
|
| 326 |
+
|
| 327 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 328 |
+
|
| 329 |
+
(a) If your work uses existing assets, did you cite the creators? [N/A]
|
| 330 |
+
(b) Did you mention the license of the assets? [N/A]
|
| 331 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [Yes] We release the datasets and codebase we used as part of the supplementary material.
|
| 332 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
|
| 333 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [N/A]
|
| 334 |
+
|
| 335 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 336 |
+
|
| 337 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
|
| 338 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 339 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
acceleratedtrainingofphysicsinformedneuralnetworkspinnsusingmeshlessdiscretizations/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:053a6e782cb0f9ba58ec729efd54437ce29855d0db689be0778d7e9ab2f1837f
|
| 3 |
+
size 551567
|
acceleratedtrainingofphysicsinformedneuralnetworkspinnsusingmeshlessdiscretizations/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:66179b9d77f1eb9cec65000c13c0e98c316265845d992d60b336a1fc278c764b
|
| 3 |
+
size 522603
|
acceleratingcertifiedrobustnesstrainingviaknowledgetransfer/f9bcbb19-37bc-4d36-afdb-60a0b24ca6d5_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:16fccf9da5364717128eed11752455b474133d66c0a7dafe7c3fa8f8f569e7e7
|
| 3 |
+
size 80239
|
acceleratingcertifiedrobustnesstrainingviaknowledgetransfer/f9bcbb19-37bc-4d36-afdb-60a0b24ca6d5_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9b21bde5c7fa09cb8c36698a30735f055e2299b72c453144d39c71596caecacf
|
| 3 |
+
size 100761
|
acceleratingcertifiedrobustnesstrainingviaknowledgetransfer/f9bcbb19-37bc-4d36-afdb-60a0b24ca6d5_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8e299fd6d3bea02820b0049a4a1fda00bb21f81fd832d26f9f86b7dcc518cbe6
|
| 3 |
+
size 387111
|
acceleratingcertifiedrobustnesstrainingviaknowledgetransfer/full.md
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerating Certified Robustness Training via Knowledge Transfer
|
| 2 |
+
|
| 3 |
+
Pratik Vaishnavi
|
| 4 |
+
|
| 5 |
+
Stony Brook University
|
| 6 |
+
|
| 7 |
+
pvaishnavi@cs.stonybrook.edu
|
| 8 |
+
|
| 9 |
+
Kevin Eykholt
|
| 10 |
+
|
| 11 |
+
IBM Research
|
| 12 |
+
|
| 13 |
+
kheykholt@ibm.com
|
| 14 |
+
|
| 15 |
+
Amir Rahmati
|
| 16 |
+
|
| 17 |
+
Stony Brook University
|
| 18 |
+
|
| 19 |
+
amir@cs.stonybrook.edu
|
| 20 |
+
|
| 21 |
+
# Abstract
|
| 22 |
+
|
| 23 |
+
Training deep neural network classifiers that are certifiably robust against adversarial attacks is critical to ensuring the security and reliability of AI-controlled systems. Although numerous state-of-the-art certified training methods have been developed, they are computationally expensive and scale poorly with respect to both dataset and network complexity. Widespread usage of certified training is further hindered by the fact that periodic retraining is necessary to incorporate new data and network improvements. In this paper, we propose Certified Robustness Transfer (CRT), a general-purpose framework for reducing the computational overhead of any certifiably robust training method through knowledge transfer. Given a robust teacher, our framework uses a novel training loss to transfer the teacher's robustness to the student. We provide theoretical and empirical validation of CRT. Our experiments on CIFAR-10 show that CRT speeds up certified robustness training by $8 \times$ on average across three different architecture generations while achieving comparable robustness to state-of-the-art methods. We also show that CRT can scale to large-scale datasets like ImageNet.
|
| 24 |
+
|
| 25 |
+
# 1 Introduction
|
| 26 |
+
|
| 27 |
+
Deep Neural Networks (DNNs) are susceptible to adversarial evasion attacks [31, 9], that add a small amount of carefully crafted imperceptible noise to an input to reliably trigger misclassification. As a defense, numerous training methods have been proposed [25, 40, 35] to grant empirical robustness to a DNN. But in the absence of any provable guarantees for this robustness, these defenses were frequently broken [1, 32]. These failures have motivated the development of training methods that grant certifiable/provable robustness to a classifier, hence safeguarding it against all attacks (known or unknown) within a pre-determined threat model. Such methods are broadly categorized as either deterministic or probabilistic [23]. Deterministic robustness training methods [12, 26, 33, 34, 28, 10, 41, 30] rely on computing provable bounds on the output neurons of a classifier for a given perturbation budget in the input space. However, the deterministic robustness guarantees provided by these methods come at a high computational cost. Probabilistic robustness training methods address this limitation by providing highly probable (e.g., with 0.99 probability) robustness guarantees at a greatly reduced computational cost. Within this category, randomized smoothing-based methods [19, 3, 29, 22, 20, 7, 37, 39, 16, 15] are considered the state-of-the-art for certifiable robustness in the $\ell_2$ -space. Even so, these training methods remain an order of magnitude slower than standard training. In commercial applications where constant model re-deployment occurs to provide improvements (see Figure 1), re-training using computationally expensive methods is burdensome.
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
Figure 1: Evolution of DNN architectures on the ImageNet dataset. We plot the performance (top-1 accuracy) and the number of parameters of a few popular architectures (year of release is noted in brackets). Newer generations attempt to improve performance and/or reduce network parameters.
|
| 31 |
+
|
| 32 |
+
In this work, we reduce the training overhead of randomized smoothing-based robustness training methods with minimal impact on the robustness achieved. We propose Certified Robustness Transfer (CRT), a knowledge transfer framework that significantly speeds up the process of training $\ell_2$ certifiably robust image classifiers. Given a pre-trained classifier that is certifiably robust (i.e., teacher), CRT trains a new classifier (i.e., student) that has comparable levels of robustness in a fraction of the time required by state-of-the-art methods. CRT brings down the cost of training certifiably robust image classifiers to be comparable to standard training while preserving state-of-the-art robustness. On CIFAR-10, CRT speeds up training by an average of $8\times$ across three different architecture generations compared to a state-of-the-art robustness training method [15]. Furthermore, we show that state-of-the-art robustness training is only necessary to train the initial classifier. Afterward, CRT can be continuously reused to transfer robustness in order to expedite future model re-deployments and greatly reduce costs associated with computational resources. Our contributions can be summarized as follows:
|
| 33 |
+
|
| 34 |
+
- We present Certified Robustness Transfer (CRT), the first framework, to our knowledge, that can transfer the robustness of a certifiably robust teacher classifier to a new student classifier. CRT greatly reduces the time required to train certifiably robust image classifiers relative to existing state-of-the-art methods while achieving comparable or better robustness.
|
| 35 |
+
- We provide a theoretical understanding of CRT, showing how our approach of matching outputs enables robustness transfer between the student and teacher irrespective of the certified robustness training method used to train the teacher.
|
| 36 |
+
- On CIFAR-10, we show that CRT trains certifiably robust classifiers on average $8 \times$ faster than a state-of-the-art method while having comparable or better Average Certified Radius (by $8\%$ in the best case). Furthermore, CRT reduces the cumulative computational cost of training three classifiers by $87.84\%$ .
|
| 37 |
+
- We also show that CRT can be reused in a recursive manner, thus supporting a continuous redeployment scenario (e.g., in commercial applications). Finally, we show that CRT remains effective on a large-scale dataset, ImageNet.
|
| 38 |
+
|
| 39 |
+
# 2 Background
|
| 40 |
+
|
| 41 |
+
In this section, we briefly introduce certified robustness and discuss notable existing methods for training certifiably robust image classifiers using randomized smoothing.
|
| 42 |
+
|
| 43 |
+
# 2.1 Preliminaries
|
| 44 |
+
|
| 45 |
+
Problem Setup. Consider a neural network classifier $f$ parameterized by $\theta$ (denoted $f_{\theta}$ ) trained to map a given input $x \in \mathbb{R}^d$ to a set of discrete labels $\mathcal{V}$ using a set of i.i.d. samples $S = \{(x_1, y_1), (x_1, y_1), \dots, (x_n, y_n)\}$ drawn from a data distribution $\mathcal{D}$ . The output of the classifier can be written as $f_{\theta}(x) = \arg \max_{c \in \mathcal{V}} z_{\theta}^{c}(x)$ . Here $z_{\theta}(x)$ is the softmax output of the classifier and $z_{\theta}^{c}(x)$ denotes the probability that image $x$ belongs to class $c$ .
|
| 46 |
+
|
| 47 |
+
Certified Robustness via Randomized Smoothing. The robustness of the classifier $f_{\theta}$ for a given input pair $(x, y)$ is defined using the radius of the largest $\ell_2$ ball centered at $x$ within which $f_{\theta}$ has a constant output $y$ . This radius is referred to as robust radius and it can mathematically be expressed as:
|
| 48 |
+
|
| 49 |
+
$$
|
| 50 |
+
R \left(f _ {\theta}; x, y\right) = \left\{ \begin{array}{c c} \inf _ {f _ {\theta} \left(x ^ {\prime}\right) \neq f _ {\theta} (x)} \| x ^ {\prime} - x \| _ {2} & , \text {w h e n} f _ {\theta} (x) = y \\ 0 & , \text {w h e n} f _ {\theta} (x) \neq y \end{array} \right. \tag {1}
|
| 51 |
+
$$
|
| 52 |
+
|
| 53 |
+
Within this $\ell_2$ -neighborhood of $x$ , $f_{\theta}$ is considered to be certifiably robust. Therefore, to improve the robustness of a classifier, one needs to maximize this robust radius corresponding to any point sampled from the given data distribution. Directly maximizing the robust radius of a DNN classifier is an NP-hard problem [17]. Therefore, several prior works attempt to derive a lower bound for the robust radius [21, 19, 3]. This lower bound, often termed as the certified radius, satisfies the following condition: $0 \leq CR(f_{\theta}; x, y) \leq R(f_{\theta}; x, y)$ , for any $f_{\theta}$ , $(x, y)$ . In this paper, we utilize the certified robustness framework derived by Cohen et al. [3] using randomized smoothing. Given a classifier $f_{\theta}$ , they first define the smooth classifier $g_{\theta}$ as:
|
| 54 |
+
|
| 55 |
+
Definition 2.1. For a given (base) classifier $f_{\theta}$ and $\sigma > 0$ , the smooth classifier $g_{\theta}$ corresponding to $f_{\theta}$ is defined as follows:
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
g _ {\theta} (x) = \underset {c \in \mathcal {Y}} {\arg \max } P _ {\eta \sim \mathcal {N} \left(0, \sigma^ {2} I\right)} \left(f _ {\theta} \left(x + \eta\right) = c\right) \tag {2}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
Simply put, $g_{\theta}$ returns the class $c$ , which has the highest probability mass under the Gaussian distribution $\mathcal{N}(x, \sigma^2 I)$ . Using Theorem 2.2, they proved that if the smooth classifier correctly classifies a given input $x$ , it is certifiably robust at $x$ . They also provided an analytical form of the $\ell_2$ certified radius at $x$ .
|
| 62 |
+
|
| 63 |
+
Theorem 2.2. Let $f_{\theta} : \mathbb{R}^d \mapsto \mathcal{V}$ be a classifier and $g_{\theta}$ be its smoothed version (as defined in Definition 2.1). For a given input $x \in \mathbb{R}^d$ and corresponding ground truth $y \in \mathcal{V}$ , if $g_{\theta}$ correctly classifies $x$ as $y$ , i.e.,
|
| 64 |
+
|
| 65 |
+
$$
|
| 66 |
+
P _ {\eta} \left(f _ {\theta} (x + \eta) = y\right) \geq \max _ {y ^ {\prime} \neq y} P _ {\eta} \left(f _ {\theta} (x + \eta) = y ^ {\prime}\right) \tag {3}
|
| 67 |
+
$$
|
| 68 |
+
|
| 69 |
+
then $g_{\theta}$ is provably robust at $x$ within the certified radius $R$ given by:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
C R \left(g _ {\theta}; x, y\right) = \frac {\sigma}{2} \left[ \Phi^ {- 1} \left(P _ {\eta} \left(f _ {\theta} (x + \eta) = y\right)\right) - \Phi^ {- 1} \left(\max _ {y ^ {\prime} \neq y} P _ {\eta} \left(f _ {\theta} (x + \eta) = y ^ {\prime}\right)\right) \right] \tag {4}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
where $\Phi$ is the c.d.f. of the standard Gaussian distribution.
|
| 76 |
+
|
| 77 |
+
This certified radius is a tight lower bound of the robust radius defined in Equation 1, i.e., it is impossible to certify $g_{\theta}$ at $x$ for a radius larger than $CR$ .
|
| 78 |
+
|
| 79 |
+
# 2.2 Training Methods for Maximizing Certified Radius
|
| 80 |
+
|
| 81 |
+
In addition to the theoretical framework discussed above, Cohen et al. [3] also propose a simple yet effective method for training the base classifier in a way that maximizes the $\ell_2$ certified radius of the smooth classifier, as expressed in Equation 4. We include an evaluation of their method in Appendix ??. Following their work, several other works build upon the randomized smoothing framework and propose training methods that better maximize the $\ell_2$ certified radius of the smooth classifier. Salman et al. [29] proposed combining adversarial training [25] with randomized smoothing (called SmoothAdv). They adapted the vanilla PGD attack to target the smooth classifier $g_{\theta}$ instead of the base classifier $f_{\theta}$ . Zhai et al. [39] proposed a new robustness loss, a hinge loss that enforces maximization of the soft approximation of the certified radius. Their method (called MACER) is faster than SmoothAdv as it does not use adversarial training. More recently, Jeong et al. [15] proposed training with a convex combination of samples along the direction of adversarial perturbation for each input to regularize over-confident predictions. Their method (called SmoothMix) is the current state-of-the-art in the domain of $\ell_2$ certified robust image classifiers. Finally, we note the Consistency regularization method proposed by Jeong et al. [16], which adds a regularization loss to existing methods that helps better maximize the certified radius.
|
| 82 |
+
|
| 83 |
+
Table 1: Training on CIFAR-10 using a ResNet110 classifier on a single Nvidia V100 GPU. State-of-the-art robustness training methods significantly slow down training compared to standard training.
|
| 84 |
+
|
| 85 |
+
<table><tr><td>METHOD</td><td>TRAINING SLOWDOWN FACTOR</td></tr><tr><td>SMOOTHADV</td><td>46.20×</td></tr><tr><td>MACER</td><td>20.86×</td></tr><tr><td>SMOOTHMIX</td><td>4.97×</td></tr></table>
|
| 86 |
+
|
| 87 |
+
# 3 Maximizing Certified Radius via Knowledge Transfer
|
| 88 |
+
|
| 89 |
+
Although prior works have proposed methods for increasing the certified radius of the smooth classifier, their training overhead is significant, making them much slower than standard training. As we show in Table 1, training a certifiably robust ResNet110 classifier to convergence using SmoothAdv, MACER, and SmoothMix is $46.20 \times$ $20.86 \times$ , and $4.97 \times$ slower, respectively, compared to training a non-robust classifier with standard training.
|
| 90 |
+
|
| 91 |
+
Given constant innovations in architecture design (Figure 1) and the influx of new data, which may result in various tweaks to deployed networks that elicit retraining, the large overhead of state-of-the-art robustness training methods makes preserving certified robustness across model re-deployment difficult. Therefore, we propose Certified Robustness Transfer (CRT), a training method that improves the usability of certified robustness training methods by dramatically reducing their training overhead while preserving the certified robustness. Given the base classifier of a pre-trained certifiably robust smooth classifier, we leverage the knowledge transfer framework to guide the training of a new base classifier (and its associated robust smooth classifier). In this section, we describe our method and provide theoretical justification for its effectiveness.
|
| 92 |
+
|
| 93 |
+
# 3.1 Transferring Certified Robustness
|
| 94 |
+
|
| 95 |
+
From Equation 4, it follows that training the base classifier to maximize $P_{\eta}(f_{\theta}(x + \eta) = y)$ for any given input $x$ will result in the maximization of the certified radius associated with the smooth classifier, provided Equation 3 is satisfied. Thus, for the base classifier $f_{\theta}(x)$ , our goal is to maximize the following quantity over the training set:
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
\sum_ {i = 1} ^ {n} \mathbb {E} _ {\eta} \mathbf {1} \left[ f _ {\theta} \left(x _ {i} + \eta\right) = y _ {i} \right] \approx \sum_ {i = 1} ^ {n} \mathbb {E} _ {\eta} \left[ z _ {\theta} ^ {y _ {i}} \left(x _ {i} + \eta\right) \right] \tag {5}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
In the above equation, like prior works [3, 29, 39], we leverage the fact that the softmax output of a classifier can be treated as a continuous and differentiable approximation of its arg max output. Methods like SmoothAdv [29], MACER [39] and SmoothMix [15] that target $\ell_2$ certifiable robustness propose training objectives that maximize this term.
|
| 102 |
+
|
| 103 |
+
Now, suppose we have a pre-trained base classifier $f_{\phi}$ . It follows that $\mathbb{E}_{\eta}[z_{\phi}^{y}(x + \eta)] \geq 0$ . Through straightforward algebraic manipulations (see Appendix ??), we derive the following lower bound:
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\sum_ {i = 1} ^ {n} \mathbb {E} _ {\eta} \left[ z _ {\theta} ^ {y _ {i}} \left(x _ {i} + \eta\right) \right] \geq - \sum_ {i = 1} ^ {n} \mathbb {E} _ {\eta} \left[ z _ {\phi} ^ {y _ {i}} \left(x _ {i} + \eta\right) - z _ {\theta} ^ {y _ {i}} \left(x _ {i} + \eta\right) \right] \tag {6}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
That is to say that, for a given input $x_{i}$ , if we minimize the difference between the softmax outputs of the teacher and the student ( $f_{\phi}$ and $f_{\theta}$ ) corresponding to the correct label $y_{i}$ , we will maximize Equation 5 for the student. However, to ensure that the student has a non-trivial certified radius, we must also ensure that Equation 3 is satisfied. If we assume that Equation 3 holds true for the teacher (i.e., the base classifier of a certifiably robust smooth classifier), this condition can also be achieved for the student by matching the overall softmax output of the student to that of the teacher.
|
| 110 |
+
|
| 111 |
+
# 3.2 Certified Robustness Transfer (CRT)
|
| 112 |
+
|
| 113 |
+
Based on the previous discussion, we now describe our method for training a certifiably robust classifier through knowledge transfer. First, we obtain a pre-trained base classifier $f_{\phi}$ , which has been trained using a randomized smoothing based robustness training method as this maximizes
|
| 114 |
+
|
| 115 |
+
Algorithm 1 Certified Robustness Transfer (CRT)
|
| 116 |
+
1: Input: Training data distribution $\mathcal{D}$ , certifiably robust teacher base classifier $f_{\phi}$ , noise level $\sigma$ , total training iterations $\mathcal{T}$ , learning rate $\alpha$
|
| 117 |
+
2: Output: Certifiably robust student base classifier $f_{\theta}$
|
| 118 |
+
3: $\theta \gets$ random initialization
|
| 119 |
+
4: $i \gets 0$
|
| 120 |
+
5: while $i < \mathcal{T}$ do
|
| 121 |
+
6: From $\mathcal{D}$ , sample a batch of inputs $\{x_1, x_2, \dots, x_n\}$ .
|
| 122 |
+
7: From $\mathcal{N}(0, \sigma^2 I)$ , generate a batch of Gaussian noise samples $\{\eta_1, \eta_2, \dots, \eta_n\}$ .
|
| 123 |
+
8: $l_i \gets \frac{1}{n} \sum_{j=1}^{n} \|z_{\phi}(x_j + \eta_j) - z_{\theta}(x_j + \eta_j)\|_2$
|
| 124 |
+
9: $\theta \gets \theta - \alpha \cdot \nabla_\theta l_i$
|
| 125 |
+
10: $i \gets i + 1$
|
| 126 |
+
11: end while
|
| 127 |
+
|
| 128 |
+
$\mathbb{E}_{\eta}[z_{\phi}^{y}(x + \eta)]$ . Next, we use $f_{\phi}$ as a teacher to train a new student base classifier $f_{\theta}$ . The student is trained to match the output of the teacher. In doing so, we can maximize the certified radius of the associated smooth classifier $g_{\theta}$ (Equation 6), as well as ensure that Equation 3 is satisfied. We describe our implementation in Algorithm 1. Given a batch of inputs, we first perturb them with additive Gaussian noise. Next, we compute the $\ell_2$ distance between the student and the teacher's outputs for these Gaussian perturbed inputs. This distance serves as our loss function, and we update the parameters of the student to minimize this loss. At test time, the classifier $f_{\theta}$ is converted to its smooth version $g_{\theta}$ following Definition 2.1.
|
| 129 |
+
|
| 130 |
+
# 3.3 Prior Works on Robustness Transfer
|
| 131 |
+
|
| 132 |
+
Several prior works have examined transferring adversarial robustness between classifiers, but these works have been limited to transferring empirical rather than certified robustness [2, 8, 14, 42, 43]. Of note is the work by Goldblum et al. [8] in which they combine adversarial training [25] with knowledge distillation [13]. They show that distilling knowledge from a large network to a small network improves its empirical robustness as compared to training the small network on its own, but their method makes no effort to improve the computational cost of adversarial training.
|
| 133 |
+
|
| 134 |
+
# 4 Evaluation
|
| 135 |
+
|
| 136 |
+
Our goal is to improve the usability of randomized smoothing based robustness training methods. In this section, we demonstrate how CRT enables the reuse of an existing certifiably robust classifier to train new certifiably robust classifiers at significantly reduced training cost compared to prior methods. In our first experiment, we train a ResNet10 classifier with a state-of-the-art method, i.e., SmoothMix [15]), and use CRT to transfer its robustness to train several newer generation classifiers. In a second experiment, we recursively use CRT to train a newer generation classifier using the previous generation classifier that was also trained using CRT. In each experiment, we compare the certified robustness of classifier trained using CRT against a classifier trained using SmoothMix (Section 4.1). We find that classifiers trained using CRT are similarly robust as when trained using SmoothMix but only require a fraction of training time (Section 4.2). Our main results are generated using the CIFAR-10 dataset [18], but we also demonstrate the effectiveness of CRT on ImageNet [5] (Section 5.3). Both these datasets are open-source and free for non-commercial use.
|
| 137 |
+
|
| 138 |
+
Architectures. We use several popular DNN architectures that were proposed to either improve upon the visual recognition performance of the previous generation architectures or preserve performance while requiring significantly fewer parameters (or both). For the CIFAR-10 experiments, we use ResNet110 [11], ResNeXt29-2x64d [36], DLA [38], and RegNetX_200MF [27].<sup>2</sup>
|
| 139 |
+
|
| 140 |
+
Training details. All SmoothMix classifiers were trained using the code made available by the authors<sup>3</sup> and the hyperparameters reported by them [15]. All CRT classifiers were trained using Stochastic Gradient Descent till convergence (200 epochs), with a batch size of 128. Further hyperparameter details are available in Appendix ??. Unless specified, we report results for noise
|
| 141 |
+
|
| 142 |
+
level $\sigma = 0.25$ in the main paper. Additional results for higher noise levels $\sigma = 0.5$ and 1.0 are reported in Appendix ??
|
| 143 |
+
|
| 144 |
+
Evaluation Metrics. We report our results using two metrics. First, as done in prior work, we measure the certified robustness of a classifier based on (1) the certified test accuracy at $\ell_2$ radius $r^4$ , which is defined as the fraction of test set inputs that the smooth classifier classifies correctly within an $\ell_2$ ball of radius $r$ centered at each input, and (2) average certified radius (ACR), which is the average of the certified radius across all inputs in the test set:
|
| 145 |
+
|
| 146 |
+
$$
|
| 147 |
+
A C R (g _ {\theta}) = \frac {1}{n _ {t e s t}} \sum_ {i = 1} ^ {n _ {t e s t}} C R (g _ {\theta}; x _ {i}, y _ {i})
|
| 148 |
+
$$
|
| 149 |
+
|
| 150 |
+
On CIFAR-10, we compute these metrics using the entire test set. Second, we measure training time of a classifier based on the per-epoch time and total training time. The total training time is computed once the model's loss has converged. All classifiers were trained on the same machine with a single Nvidia Titan V GPU.
|
| 151 |
+
|
| 152 |
+
# 4.1 Certified Robustness Comparison
|
| 153 |
+
|
| 154 |
+
Standard CRT Training. Given a ResNet110 classifier trained using SmoothMix, we transfer its robustness to several newer generation classifiers. We compare the certified robustness of these classifiers with their SmoothMix trained versions. The results are summarized in Table 2. We observe that using CRT does not reduce the certified robustness of the trained classifier compared to training with SmoothMix. In fact, interestingly, CRT trained classifiers exhibit higher certified robustness compared to their SmoothMix baseline. Not only do CRT trained classifiers have higher ACR (improvement of $8.1\%$ in the best case), they also exhibit higher certified accuracy at different $\ell_2$ radii. Furthermore, CRT remains effective even as the generation gap between the student and the teacher increases. This implies that the same teacher can potentially be reused indefinitely, amortizing the teacher's training cost to a constant. These results empirically validate our theoretical justification of CRT. Finally, we note that in Table 2, the accuracy on clean inputs and ACR of CRT trained classifiers follow the same trend as in Figure 1, thus motivating the need for periodic model re-deployment to incorporate architectural improvements.
|
| 155 |
+
|
| 156 |
+
Table 2: The certified robustness of classifiers with different architectures trained on CIFAR-10 using SmoothMix [15] and CRT. We use CRT to transfer the robustness of a ResNet110 trained using SmoothMix. We report certified test accuracy at different values of $\ell_2$ radius and the Average Certified Radius (ACR). The architectures are sorted chronologically based on published date. The noise level $\sigma$ is set to 0.25.
|
| 157 |
+
|
| 158 |
+
<table><tr><td>ARCHITECTURE</td><td>0.00</td><td>0.25</td><td>0.50</td><td>0.75</td><td>ACR</td></tr><tr><td colspan="6">SMOOTHMIX [15]</td></tr><tr><td>RESNET110 [11]</td><td>76.89</td><td>68.25</td><td>57.42</td><td>46.26</td><td>0.550</td></tr><tr><td>RESNEXT29-2x64D [36]</td><td>75.98</td><td>65.40</td><td>53.78</td><td>41.03</td><td>0.516</td></tr><tr><td>DLA [38]</td><td>77.72</td><td>68.53</td><td>57.69</td><td>45.56</td><td>0.551</td></tr><tr><td>REGNETX_200MF [27]</td><td>76.48</td><td>66.79</td><td>56.36</td><td>44.47</td><td>0.538</td></tr><tr><td colspan="6">CRT (RESNET110 TEACHER)</td></tr><tr><td>RESNEXT29-2x64D [36]</td><td>77.57</td><td>69.00</td><td>58.31</td><td>47.16</td><td>0.558</td></tr><tr><td>DLA [38]</td><td>77.31</td><td>68.91</td><td>58.26</td><td>46.34</td><td>0.554</td></tr><tr><td>REGNETX_200MF [27]</td><td>77.89</td><td>69.57</td><td>59.36</td><td>47.28</td><td>0.564</td></tr></table>
|
| 159 |
+
|
| 160 |
+
Recursive CRT Training. We now explore the effectiveness of CRT if it is used recursively, i.e., the newest generation is trained using a CRT trained classifier from the previous generation as the teacher. We begin with a ResNet110 trained using SmoothMix. Then, all subsequent classifiers are trained using CRT recursively and report the results in Table 3. The chain length measures the number of times CRT was used. For example, the DLA network, with a chain length of 2, is the result of using CRT
|
| 161 |
+
|
| 162 |
+
Table 3: The certified robustness of classifiers with different architectures trained on CIFAR-10 using CRT recursively. We report certified test accuracy at different values of $\ell_2$ radius and the Average Certified Radius (ACR). Here, the previous generation classifier is used to train the current generation one. Chain length represents the number times CRT was used in training. The noise level $\sigma$ is set to 0.25. CRT remains effective despite recursive use.
|
| 163 |
+
|
| 164 |
+
<table><tr><td>ARCHITECTURE</td><td>CHAIN LENGTH</td><td>0.00</td><td>0.25</td><td>0.50</td><td>0.75</td><td>ACR</td></tr><tr><td>RESNEXT29-2X64D [36]</td><td>1</td><td>77.57</td><td>69.00</td><td>58.31</td><td>47.16</td><td>0.558</td></tr><tr><td>DLA [38]</td><td>2</td><td>78.46</td><td>70.05</td><td>60.01</td><td>48.30</td><td>0.570</td></tr><tr><td>REGNETX_200MF [27]</td><td>3</td><td>78.16</td><td>69.00</td><td>58.69</td><td>47.00</td><td>0.559</td></tr></table>
|
| 165 |
+
|
| 166 |
+
twice: once to transfer the SmoothMix trained ResNet110 network's performance to the ResNeXt29-2x64d network and once to transfer the CRT trained ResNeXt29-2x64d network's performance to the DLA network. We observe that the certified robustness of the resulting classifiers remains high even with recursive use of CRT. The empirical results are to be expected given our theoretical understanding of CRT: In order to train a robust student, we only require that the teacher is already robust (i.e., satisfies the condition of Theorem 2.2) irrespective of the training method used to achieve robustness. Thus, we expect CRT to remain effective even at longer chain lengths. In Section 5.2, we will highlight the relationship between the teacher's training method and the robustness of a CRT trained student.
|
| 167 |
+
|
| 168 |
+
# 4.2 Training Time Comparison
|
| 169 |
+
|
| 170 |
+
Having established that CRT effectively transfers certified robustness between classifiers, we now evaluate its training overhead. For comparison, we also evaluate the training overhead of SmoothMix. In Table 4, we report the per-epoch time and total time of training different architectures with each method. For brevity, we only compare the training time for standard CRT. $^{5}$ We observe that the per-epoch time of CRT is significantly lower than SmoothMix. Similarly, when trained until convergence, the total training time of CRT is significantly lower. Across the three architectures that we run our experiments on, CRT achieves an average epoch time speedup of $10.75 \times$ . Comparing overall training times, CRT speeds up training by, on average, $8.06 \times$ . If we consider the real-world scenario where the model has to be periodically redeployed to incorporate architectural improvements, the cumulative training time using SmoothMix is 96.21 hours as each new architecture is trained from scratch. With CRT, the cumulative time is reduced to 11.70 hours representing a $87.84\%$ savings in costs associated with computational resources.
|
| 171 |
+
|
| 172 |
+
Teacher's availability. So far, we assumed the availability of a certifiably robust teacher (ResNet110). We argue that this is a reasonable assumption as the amortized cost associated with the one-time training of a robust teacher is negligible across many generations of the model. Regardless, in Section 5.1, we examine a scenario where the teacher is unavailable. Under this scenario, we demonstrate how CRT can be used to speedup the training of ResNet110 for use as teacher.
|
| 173 |
+
|
| 174 |
+
Table 4: Training time statistics for SmoothMix and CRT. We report the mean and $95\%$ confidence interval computed over all training epochs. CRT is on average $8\times$ faster than SmoothMix across all three architectures.
|
| 175 |
+
|
| 176 |
+
<table><tr><td rowspan="2">ARCHITECTURE</td><td colspan="2">SMOOTHMIX [15]</td><td colspan="2">CRT (RESNET110 TEACHER)</td></tr><tr><td>EPOCH TIME (S)</td><td>TOTAL TIME (H)</td><td>EPOCH TIME (S)</td><td>TOTAL TIME (H)</td></tr><tr><td>RESNET110 [11]</td><td>455.55 ± 1.17</td><td>18.98</td><td>-</td><td>-</td></tr><tr><td>RESNEXT29-2x64D [36]</td><td>1085.09 ± 0.50</td><td>45.21</td><td>86.41 ± 0.11</td><td>4.80</td></tr><tr><td>DLA [38]</td><td>854.41 ± 0.09</td><td>35.60</td><td>62.24 ± 0.40</td><td>3.46</td></tr><tr><td>REGNETX_200MF [27]</td><td>369.42 ± 0.51</td><td>15.39</td><td>61.92 ± 0.30</td><td>3.44</td></tr></table>
|
| 177 |
+
|
| 178 |
+
# 5 Discussion
|
| 179 |
+
|
| 180 |
+
In this section, we address the standout concerns about CRT. The section layout is as follows: in Section 5.1, we discuss the scenario in which a certifiably robust teacher is not readily available and demonstrate how CRT can still speed up robustness training; in Section 5.2, we examine how the method used to train the teacher affects the robustness of the student; in Section 5.3, we study the scalability of CRT using the ImageNet dataset; in Section 5.4, we compare CRT with a closely related prior work on fast certified robustness training, i.e., Consistency regularization [16]; in Section 5.5, we discuss the limitations of CRT; in Section 5.6, we address the broader impact of CRT.
|
| 181 |
+
|
| 182 |
+
# 5.1 Teacher Not Available
|
| 183 |
+
|
| 184 |
+
We've designed CRT under the assumption that a certifiably robust teacher is already available. However, even if a certifiably robust teacher is not available, CRT can still speed up training. Given a certifiable robust training method and a large network architecture, we can reduce the training overhead by robustly training a comparatively smaller network first. Then, we can use CRT to transfer the robustness of the small network to a larger network. In Table 5, we present results for such a process. First, we trained a ResNet20 network using SmoothMix, then we used CRT to train a ResNet110 network. We compare the robustness of a ResNet110 trained using this process with one trained using SmoothMix. As we can see, the CRT ResNet110 network has comparable robustness with the SmoothMix ResNet110 network. However, even when adding the teacher and student training times, CRT still speeds up training by approximately $2 \times$ relative to SmoothMix.
|
| 185 |
+
|
| 186 |
+
Table 5: Certified robustness and total time of a ResNet110 classifier trained on CIFAR-10 using SmoothMix and CRT. For CRT, we train a ResNet20 teacher first using SmoothMix and report total time as the time taken to train the teacher and the student. The noise level $\sigma$ is set to 0.25. The ResNet110 trained using CRT achieves an ACR comparable to the SmoothMix ResNet110 while achieving a $\sim 2\times$ speedup in total training time.
|
| 187 |
+
|
| 188 |
+
<table><tr><td>METHOD</td><td>0.00</td><td>0.25</td><td>0.50</td><td>0.75</td><td>ACR</td><td>TOTAL TIME (H)</td></tr><tr><td>SMOOTHMIX [15]</td><td>76.89</td><td>68.25</td><td>57.42</td><td>46.26</td><td>0.550</td><td>18.98</td></tr><tr><td>CRT (RESNET20 TEACHER)</td><td>75.68</td><td>67.20</td><td>56.30</td><td>44.83</td><td>0.540</td><td>10.07</td></tr></table>
|
| 189 |
+
|
| 190 |
+
# 5.2 Teacher Training Method
|
| 191 |
+
|
| 192 |
+
We train a ResNet20 classifier using MACER [39], SmoothAdv [29], and SmoothMix [15]. For MACER and SmoothAdv training, we use the code made available by the authors[67] and the hyperparameters reported by them. Using CRT, we transfer the robustness of each teacher to a ResNet110 classifier. The results are reported in Table 6. For reference, we also report robustness of a ResNet110 network trained independently using the chosen robustness training methods. Overall, we observe a slight variation in the robustness of the CRT trained networks depending on the teachers training method. Based on Equation 6, this is expected as maximizing the teacher's performance will in turn maximize the performance of the student. Our empirical results align with this reasoning: the MACER teacher was the least robust of the three methods, and its student is similarly the least robust of the students. However, in all cases, the CRT trained network obtained certified robustness comparable to its teacher.
|
| 193 |
+
|
| 194 |
+
# 5.3 Scalability
|
| 195 |
+
|
| 196 |
+
Here, we study the effectiveness of CRT on a large-scale dataset, i.e., ImageNet. For this purpose, we train ResNet18 classifiers using three certified robustness training methods (MACER, SmoothAdv, and SmoothMix). Next, we transfer their robustness to a ResNet50 classifier. The results were generated on a 500 sample test set (following prior works [29, 39, 15]) and are summarized in Table 7. For reference, we also report robustness of a ResNet50 network trained independently using the chosen robustness training methods. In all cases, we observe that students achieve certified robustness comparable to their respective teachers. Therefore, CRT remains effective even on a more complex dataset.
|
| 197 |
+
|
| 198 |
+
Table 6: For CIFAR-10 dataset, certified robustness achieved on training the CRT teacher (ResNet20) with different methods. The student classifier is ResNet110. For reference, we also report robustness of ResNet110 trained independently using chosen methods. The noise level $\sigma$ is set to 0.25. Students attain comparable ACR to their respective teachers.
|
| 199 |
+
|
| 200 |
+
<table><tr><td colspan="2">TEACHER (RESNET20)</td><td colspan="2">STUDENT (RESNET110)</td></tr><tr><td>TRAINING METHOD</td><td>ACR</td><td>TRAINING METHOD</td><td>ACR</td></tr><tr><td>SMOOTHADV [29]</td><td>0.531</td><td>CRT</td><td>0.519</td></tr><tr><td>MACER [39]</td><td>0.507</td><td>CRT</td><td>0.528</td></tr><tr><td>SMOOTHMIX [15]</td><td>0.522</td><td>CRT</td><td>0.540</td></tr><tr><td rowspan="3" colspan="2">STUDENT TRAINED DIRECTLY</td><td>SMOOTHADV [29]</td><td>0.547</td></tr><tr><td>MACER [39]</td><td>0.531</td></tr><tr><td>SMOOTHMIX [15]</td><td>0.550</td></tr></table>
|
| 201 |
+
|
| 202 |
+
Table 7: ImageNet results using CRT and three robustness training methods. We report both the ACR of the ResNet18 teacher and its ResNet50 student. For reference, we also report robustness of ResNet50 trained independently using chosen methods. The noise level $\sigma$ is set to 0.5. Students attain comparable ACR to their respective teachers.
|
| 203 |
+
|
| 204 |
+
<table><tr><td colspan="2">TEACHER (RESNET18)</td><td colspan="2">STUDENT (RESNET50)</td></tr><tr><td>TRAINING METHOD</td><td>ACR</td><td>TRAINING METHOD</td><td>ACR</td></tr><tr><td>SMOOTHADV [29]</td><td>0.684</td><td>CRT</td><td>0.684</td></tr><tr><td>MACER [39]</td><td>0.574</td><td>→ CRT</td><td>0.576</td></tr><tr><td>SMOOTHMIX [15]</td><td>0.653</td><td>CRT</td><td>0.661</td></tr><tr><td rowspan="3" colspan="2">STUDENT TRAINED DIRECTLY</td><td>SMOOTHADV [29]</td><td>0.820</td></tr><tr><td>MACER [39]</td><td>0.653</td></tr><tr><td>SMOOTHMIX [15]</td><td>0.799</td></tr></table>
|
| 205 |
+
|
| 206 |
+
# 5.4 Comparison with Consistency Regularization [16]
|
| 207 |
+
|
| 208 |
+
In Section 4, we compared CRT against SmoothMix as it has state-of-the-art ACR. However, another closely related work was recently published by Jeong & Shin [16], which shows state-of-the-art ACR and potential training time improvements. They proposed a consistency regularization loss that improves the certified robustness of smooth classifiers by enforcing the base classifier's soft outputs to be consistent across multiple noisy copies of a given input. Therefore, their additional computational overhead scales linearly with the number of noisy samples used to compute the consistency loss. With respect to computational overhead, CRT adds only one forward pass, i.e., the pass through the teacher. When paired with Gaussian data augmentation training, their regularization loss significantly improves the certified robustness of a smooth classifier. By applying their regularization loss over only two noisy copies of the input, they can achieve better certified robustness than prior state-of-the-art robustness training methods like MACER [39] and SmoothAdv [29].
|
| 209 |
+
|
| 210 |
+
The key difference between CRT and consistency regularization is in the training overhead when combined with other state-of-the-art certified training methods. Consistency regularization augments classifier training with an additional loss term. Therefore, their training overhead is dominated by the training method selected. In their experiments, they focused on Gaussian data augmentation, which adds little to no training overhead relative to standard training. However, if a more computationally intensive method was selected (e.g., MACER), they remark their training overhead would dramatically increase. With respect to CRT, if a teacher is available (i.e., a previous generation model), the overhead of CRT is agnostic to the training method. If it is not available, we demonstrated in Section 5.1, that CRT can still greatly reduce training overhead. For interested readers, we include results for transferring robustness from a teacher trained using Consistency regularization in Appendix ??
|
| 211 |
+
|
| 212 |
+
# 5.5 Limitations
|
| 213 |
+
|
| 214 |
+
In this paper, we use probabilistic certified robustness methods as they rely on Theorem 2.2 and, thus, are designed to maximize the certified radius (Equation 4). We found that deterministic methods (e.g., CROWN-IBP [41]) impose a stricter training requirement on the base teacher classifier. For a given input, deterministic training methods require the base classifier to be correct for all inputs within the $\ell_2$ -norm ball, rather than simply be likely to correctly classify inputs within the $\ell_2$ -norm ball. This restriction lowers the potential ACR of the smooth teacher classifier, which also lowers the ACR of the student trained using CRT. For example, when using CROWN-IBP [41] to train a ResNeXt base classifier, the ACR for the corresponding smooth classifier is only 0.064. When transferring the robustness of this ResNeXt classifier to a WideResNet34-10 student, we get an ACR of 0.065.
|
| 215 |
+
|
| 216 |
+
Additionally, we note that the classifier architectures we present in the paper are restricted to CNNs. Recently, a new class of transformer-based image classifiers [6, 24, 4] have been proposed that show improved performance over CNN classifiers. We briefly studied the effectiveness of CRT when transferring robustness between CNN and transformer architectures using ViT [6] and present the results in Appendix ??, but further exploration is needed. Finally, CRT has only been studied using the $\ell_2$ norm and image data due to the limitations of current certified robustness training methods.
|
| 217 |
+
|
| 218 |
+
# 5.6 Broader Impacts
|
| 219 |
+
|
| 220 |
+
As we have shown, our work improves the efficiency of training certifiably robust classifiers, in an effort to improve the security of AI-powered systems. Beyond the broad negative societal impacts of machine learning, we are not aware of any impacts specific to our work.
|
| 221 |
+
|
| 222 |
+
# 6 Conclusion
|
| 223 |
+
|
| 224 |
+
In this paper, we proposed the first general-purpose framework to speed up the training of certifiably robust classifiers using knowledge transfer and randomized smoothing. Our proposed method, Certified Robustness Transfer (CRT) enables transferring the certified robustness of a classifier to another classifier at a cost comparable to standard training. We provided a theoretical understanding of CRT and provided empirical evidence of its effectiveness. On CIFAR-10, we showed that across several generations of classifier architectures, CRT trained classifiers $8 \times$ faster than when using a state-of-the-art training method, while achieving comparable or better certified robustness. Furthermore, CRT can reduce the training overhead of certified robustness training methods even when an initial robust classifier is not present. The use of machine learning in security and safety critical environments motivates a need for models with certifiably robust performance, but the training overhead of existing certified robustness training methods inhibits their usability. Our work addresses this issue, especially for commercial applications where periodical model re-deployment is inevitable.
|
| 225 |
+
|
| 226 |
+
# Acknowledgement
|
| 227 |
+
|
| 228 |
+
This work was supported by the Office of Naval Research under grants N00014-20-1-2858 and N00014-22-1-2001, Air Force Research Lab under grant FA9550-22-1-0029, and NVIDIA 2018 GPU Grant. Any opinions, findings, or conclusions expressed in this material are those of the authors and do not necessarily reflect the views of the sponsors.
|
| 229 |
+
|
| 230 |
+
# References
|
| 231 |
+
|
| 232 |
+
[1] Anish Athalye, Nicholas Carlini, and David Wagner. Obfuscated gradients give a false sense of security: Circumventing defenses to adversarial examples. In International Conference on Machine Learning, 2018.
|
| 233 |
+
[2] Alvin Chan, Yi Tay, and Yew-Soon Ong. What it thinks is important is important: Robustness transfers through input gradients. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020.
|
| 234 |
+
[3] Jeremy Cohen, Elan Rosenfeld, and Zico Kolter. Certified adversarial robustness via randomized smoothing. In International Conference on Machine Learning, 2019.
|
| 235 |
+
[4] Zihang Dai, Hanxiao Liu, Quoc V Le, and Mingxing Tan. Coatnet: Marrying convolution and attention for all data sizes. Advances in Neural Information Processing Systems, 2021.
|
| 236 |
+
[5] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2009.
|
| 237 |
+
|
| 238 |
+
[6] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020.
|
| 239 |
+
[7] Krishnamurthy Dj Dvijotham, Jamie Hayes, Borja Balle, Zico Kolter, Chongli Qin, Andras Gyorgy, Kai Xiao, Sven Gowal, and Pushmeet Kohli. A framework for robustness certification of smoothed classifiers using f-divergences. In International Conference on Learning Representations, 2019.
|
| 240 |
+
[8] Micah Goldblum, Liam Fowl, Soheil Feizi, and Tom Goldstein. Adversarily robust distillation. In AAAI Conference on Artificial Intelligence, 2020.
|
| 241 |
+
[9] Ian J Goodfellow, Jonathon Shlens, and Christian Szegedy. Explaining and harnessing adversarial examples. In International Conference on Learning Representations, 2014.
|
| 242 |
+
[10] Sven Gowal, Krishnamurthy Dj Dvijotham, Robert Stanforth, Rudy Bunel, Chongli Qin, Jonathan Uesato, Relja Arandjelovic, Timothy Mann, and Pushmeet Kohli. Scalable verified training for provably robust image classification. In IEEE/CVF International Conference on Computer Vision, pages 4842-4851, 2019.
|
| 243 |
+
[11] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2016.
|
| 244 |
+
[12] Matthias Hein and Maksym Andriushchenko. Formal guarantees on the robustness of a classifier against adversarial manipulation. In NIPS, 2017.
|
| 245 |
+
[13] Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network, 2015. arXiv:1503.02531.
|
| 246 |
+
[14] Andrew Ilyas, Shibani Santurkar, Dimitris Tsipras, Logan Engstrom, Brandon Tran, and Aleksander Madry. Adversarial examples are not bugs, they are features. In Advances in Neural Information Processing Systems, 2019.
|
| 247 |
+
[15] Jongheon Jeong, Sejun Park, Minkyu Kim, Heung-Chang Lee, Do-Guk Kim, and Jinwoo Shin. Smoothmix: Training confidence-calibrated smoothed classifiers for certified robustness. Advances in Neural Information Processing Systems, 2021.
|
| 248 |
+
[16] Jongheon Jeong and Jinwoo Shin. Consistency regularization for certified robustness of smoothed classifiers. Advances in Neural Information Processing Systems, 2020.
|
| 249 |
+
[17] Guy Katz, Clark Barrett, David L Dill, Kyle Julian, and Mykel J Kochenderfer. Reluplex: An efficient smt solver for verifying deep neural networks. In International conference on computer aided verification, pages 97-117. Springer, 2017.
|
| 250 |
+
[18] A Krizhevsky. Learning multiple layers of features from tiny images. Master's thesis, University of Toronto, 2009.
|
| 251 |
+
[19] Mathias Lecuyer, Vaggelis Atlidakis, Roxana Geambasu, Daniel Hsu, and Suman Jana. Certified robustness to adversarial examples with differential privacy. In IEEE Symposium on Security and Privacy, 2019.
|
| 252 |
+
[20] Guang-He Lee, Yang Yuan, Shiyu Chang, and Tommi Jaakkola. Tight certificates of adversarial robustness for randomly smoothed classifiers. Advances in Neural Information Processing Systems, 32:4910–4921, 2019.
|
| 253 |
+
[21] Bai Li, Changyou Chen, Wenlin Wang, and Lawrence Carin. Second-order adversarial attack and certifiable robustness, 2019. In URL https://openreview.net/forum, 2018.
|
| 254 |
+
[22] Bai Li, Changyou Chen, Wenlin Wang, and Lawrence Carin. Certified adversarial robustness with additive noise. In Advances in Neural Information Processing Systems, 2019.
|
| 255 |
+
[23] Linyi Li, Xiangyu Qi, Tao Xie, and Bo Li. Sok: Certified robustness for deep neural networks. arXiv preprint arXiv:2009.04131v2, 2020.
|
| 256 |
+
[24] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In IEEE/CVF International Conference on Computer Vision, pages 10012-10022, 2021.
|
| 257 |
+
[25] Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. In International Conference on Learning Representations, 2018.
|
| 258 |
+
|
| 259 |
+
[26] Matthew Mirman, Timon Gehr, and Martin Vechev. Differentiable abstract interpretation for provably robust neural networks. In International Conference on Machine Learning, pages 3578-3586. PMLR, 2018.
|
| 260 |
+
[27] Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, and Piotr Dólar. Designing network design spaces. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020.
|
| 261 |
+
[28] Aditi Raghunathan, Jacob Steinhardt, and Percy Liang. Certified defenses against adversarial examples. In International Conference on Learning Representations, 2018.
|
| 262 |
+
[29] Hadi Salman, Greg Yang, Jerry Li, Pengchuan Zhang, Huan Zhang, Ilya Razenshteyn, and Sébastien Bubeck. Provably robust deep learning via adversariably trained smoothed classifiers. In Advances in Neural Information Processing Systems, 2019.
|
| 263 |
+
[30] Sahil Singla and Soheil Feizi. Second-order provable defenses against adversarial attacks. In International Conference on Machine Learning, pages 8981-8991. PMLR, 2020.
|
| 264 |
+
[31] Christian Szegedy, Wojciech Zaremba, Ilya Sutskever, Joan Bruna, Dumitru Erhan, Ian Goodfellow, and Rob Fergus. Intriguing properties of neural networks. In International Conference on Learning Representations, 2014.
|
| 265 |
+
[32] Florian Tramer, Nicholas Carlini, Wieland Brendel, and Aleksander Madry. On adaptive attacks to adversarial example defenses. Advances in Neural Information Processing Systems, 33, 2020.
|
| 266 |
+
[33] Eric Wong and Zico Kolter. Provable defenses against adversarial examples via the convex outer adversarial polytope. In International Conference on Machine Learning, pages 5286-5295. PMLR, 2018.
|
| 267 |
+
[34] Eric Wong, Frank R Schmidt, Jan Hendrik Metzen, and J Zico Kolter. Scaling provable adversarial defenses. In Advances in Neural Information Processing Systems, pages 8410-8419, 2018.
|
| 268 |
+
[35] Cihang Xie, Yuxin Wu, Laurens van der Maaten, Alan L Yuille, and Kaiming He. Feature denoising for improving adversarial robustness. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 501-509, 2019.
|
| 269 |
+
[36] Saining Xie, Ross Girshick, Piotr Dólar, Zhuowen Tu, and Kaiming He. Aggregated residual transformations for deep neural networks. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2017.
|
| 270 |
+
[37] Greg Yang, Tony Duan, J Edward Hu, Hadi Salman, Ilya Razenshteyn, and Jerry Li. Randomized smoothing of all shapes and sizes. In International Conference on Machine Learning, pages 10693-10705. PMLR, 2020.
|
| 271 |
+
[38] Fisher Yu, Dequan Wang, Evan Shelhamer, and Trevor Darrell. Deep layer aggregation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2018.
|
| 272 |
+
[39] Runtian Zhai, Chen Dan, Di He, Huan Zhang, Boqing Gong, Pradeep Ravikumar, Cho-Jui Hsieh, and Liwei Wang. Macer: Attack-free and scalable robust training via maximizing certified radius. In International Conference on Learning Representations, 2020.
|
| 273 |
+
[40] Hongyang Zhang, Yaodong Yu, Jiantao Jiao, Eric Xing, Laurent El Ghaoui, and Michael Jordan. Theoretically principled trade-off between robustness and accuracy. In International Conference on Machine Learning, 2019.
|
| 274 |
+
[41] Huan Zhang, Hongge Chen, Chaowei Xiao, Sven Gowal, Robert Stanforth, Bo Li, Duane Boning, and Cho-Jui Hsieh. Towards stable and efficient training of verifiably robust neural networks. In International Conference on Learning Representations, 2019.
|
| 275 |
+
[42] Jianing Zhu, Jiangchao Yao, Bo Han, Jingfeng Zhang, Tongliang Liu, Gang Niu, Jingren Zhou, Jianliang Xu, and Hongxia Yang. Reliable adversarial distillation with unreliable teachers. arXiv preprint arXiv:2106.04928, 2021.
|
| 276 |
+
[43] Bojia Zi, Shihao Zhao, Xingjun Ma, and Yu-Gang Jiang. Revisiting adversarial robustness distillation: Robust soft labels make student better. In IEEE/CVF International Conference on Computer Vision, pages 16443-16452, 2021.
|
| 277 |
+
|
| 278 |
+
# Checklist
|
| 279 |
+
|
| 280 |
+
1. For all authors...
|
| 281 |
+
|
| 282 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes] See Section 4.
|
| 283 |
+
(b) Did you describe the limitations of your work? [Yes] See Section 5.5.
|
| 284 |
+
(c) Did you discuss any potential negative societal impacts of your work? [No] In this paper, we propose a method to improve the usability of certified robustness training methods. Furthermore, beyond the broad negative societal impacts that results for ML, we are unaware of any specific to our work.
|
| 285 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes] We have read the ethics review guidelines and acknowledge that our paper conforms to them.
|
| 286 |
+
|
| 287 |
+
2. If you are including theoretical results...
|
| 288 |
+
|
| 289 |
+
(a) Did you state the full set of assumptions of all theoretical results? [Yes] See Section 2 for foundational theoretical results from prior work and Section 3 for the CRT specific results.
|
| 290 |
+
(b) Did you include complete proofs of all theoretical results? [Yes] The complete theoretical justification for CRT is included in Appendix ??. For proofs for other foundational results such as Theorem 2.2, see the respective paper.
|
| 291 |
+
|
| 292 |
+
3. If you ran experiments...
|
| 293 |
+
|
| 294 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes] See Appendix ??
|
| 295 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes] See Appendix ??
|
| 296 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [Yes] We include $95\%$ confidence interval for our timing results. However, computing error bars for certified accuracy and ACR requires training multiple classifiers and performing the costly process of certification multiple times. Additionally, we observed that most prior works do not compute such error bars.
|
| 297 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [No] While in Sections 4.2 and 5.1, we include the total training time results to support our claims as well as the GPU model. We do not include such information for all experiments.
|
| 298 |
+
|
| 299 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 300 |
+
|
| 301 |
+
(a) If your work uses existing assets, did you cite the creators? [Yes]
|
| 302 |
+
(b) Did you mention the license of the assets? [Yes] When specified by the asset creator, we included the license.
|
| 303 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [Yes] See Appendix ??
|
| 304 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
|
| 305 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [N/A]
|
| 306 |
+
|
| 307 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 308 |
+
|
| 309 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
|
| 310 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 311 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
acceleratingcertifiedrobustnesstrainingviaknowledgetransfer/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1d05e5f38671c1ca5b4ccabf494874f78c739a96549a670cd99d057f7e431df5
|
| 3 |
+
size 293814
|
acceleratingcertifiedrobustnesstrainingviaknowledgetransfer/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b8f2260a0c1acdf19198fac1eb241e614bdfeb0795ad4c50cdfd3e6a2bfeb526
|
| 3 |
+
size 421699
|
acceleratingsgdforhighlyillconditionedhugescaleonlinematrixcompletion/581f810d-d8d6-46a9-8afa-991db46cc0ac_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6eb133d4c0982c63568e479f59ec96a026f3edd5386dd3e2c64d0dc2beebd994
|
| 3 |
+
size 83216
|
acceleratingsgdforhighlyillconditionedhugescaleonlinematrixcompletion/581f810d-d8d6-46a9-8afa-991db46cc0ac_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:90962630194b721962987c5630b5e43d719562e4aaadc38ffa35b3bffa47fc99
|
| 3 |
+
size 101637
|
acceleratingsgdforhighlyillconditionedhugescaleonlinematrixcompletion/581f810d-d8d6-46a9-8afa-991db46cc0ac_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:00d18868d8cf095e6b1c14572a665c9bdc09ac9184b0dbe87407caca73ab9b8b
|
| 3 |
+
size 1111926
|
acceleratingsgdforhighlyillconditionedhugescaleonlinematrixcompletion/full.md
ADDED
|
@@ -0,0 +1,359 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerating SGD for Highly Ill-Conditioned Huge-Scale Online Matrix Completion
|
| 2 |
+
|
| 3 |
+
Gavin Zhang
|
| 4 |
+
|
| 5 |
+
University of Illinois at Urbana-Champaign
|
| 6 |
+
|
| 7 |
+
jialun2@illinois.edu
|
| 8 |
+
|
| 9 |
+
Hong-Ming Chiu
|
| 10 |
+
|
| 11 |
+
University of Illinois at Urbana-Champaign
|
| 12 |
+
|
| 13 |
+
hmchiu2@illinois.edu
|
| 14 |
+
|
| 15 |
+
Richard Y. Zhang
|
| 16 |
+
|
| 17 |
+
University of Illinois at Urbana-Champaign
|
| 18 |
+
|
| 19 |
+
ryz@illinois.edu
|
| 20 |
+
|
| 21 |
+
# Abstract
|
| 22 |
+
|
| 23 |
+
The matrix completion problem seeks to recover a $d \times d$ ground truth matrix of low rank $r \ll d$ from observations of its individual elements. Real-world matrix completion is often a huge-scale optimization problem, with $d$ so large that even the simplest full-dimension vector operations with $O(d)$ time complexity become prohibitively expensive. Stochastic gradient descent (SGD) is one of the few algorithms capable of solving matrix completion on a huge scale, and can also naturally handle streaming data over an evolving ground truth. Unfortunately, SGD experiences a dramatic slow-down when the underlying ground truth is ill-conditioned; it requires at least $O(\kappa \log(1/\epsilon))$ iterations to get $\epsilon$ -close to ground truth matrix with condition number $\kappa$ . In this paper, we propose a preconditioned version of SGD that preserves all the favorable practical qualities of SGD for huge-scale online optimization while also making it agnostic to $\kappa$ . For a symmetric ground truth and the Root Mean Square Error (RMSE) loss, we prove that the preconditioned SGD converges to $\epsilon$ -accuracy in $O(\log(1/\epsilon))$ iterations, with a rapid linear convergence rate as if the ground truth were perfectly conditioned with $\kappa = 1$ . In our experiments, we observe a similar acceleration for item-item collaborative filtering on the MovieLens25M dataset via a pair-wise ranking loss, with 100 million training pairs and 10 million testing pairs. [See supporting code at https://github.com/Hong-Ming/ScaledSGD.]
|
| 24 |
+
|
| 25 |
+
# 1 Introduction
|
| 26 |
+
|
| 27 |
+
The matrix completion problem seeks to recover an underlying $d \times d$ ground truth matrix $M$ of low rank $r \ll d$ from observations of its individual matrix elements $M_{i,j}$ . The problem appears most prominently in the context of collaborative filtering and recommendation system, but also numerous other applications. In this paper, we focus on the symmetric and positive semidefinite variant of the problem, in which the underlying matrix $M$ can be factored as $M = ZZ^T$ where the factor matrix $Z$ is $d \times r$ , though that our methods have natural extensions to the nonsymmetric case. We note that the symmetric positive semidefinite variant is actually far more common in collaborative filtering, due to the prevalence of item-item models, which enjoy better data (most platforms contain several orders of magnitude more users than items) and more stable recommendations (the similarity between items tends to change slowly over time) than user-user and user-item models.
|
| 28 |
+
|
| 29 |
+
For the full-scale, online instances of matrix completion that arise in real-world collaborative filtering, stochastic gradient descent or SGD is the only viable algorithm for learning the underlying matrix $M$ . The basic idea is to formulate a candidate matrix of the form $XX^T$ with respect to a
|
| 30 |
+
|
| 31 |
+
learned factor matrix $X \in \mathbb{R}^{d \times r}$ , and to minimize a cost function of the form $\phi(XX^T - M)$ . Earlier work used the root mean square error (RMSE) loss $\|XX^T - M\|_F^2 = \sum_{i,j} (XX^T - M)_{i,j}^2$ , though later work have focused on pairwise losses like the BPR [1] that optimize for ordering and therefore give better recommendations. For the RMSE loss, the corresponding SGD iterations with (rescaled) learning rate $\alpha > 0$ reads
|
| 32 |
+
|
| 33 |
+
$$
|
| 34 |
+
x _ {i, +} = x _ {i} - \alpha \cdot \left(x _ {i} ^ {T} x _ {j} - M _ {i j}\right) x _ {j}, \quad x _ {j, +} = x _ {j} - \alpha \cdot \left(x _ {i} ^ {T} x _ {j} - M _ {i j}\right) x _ {i}, \tag {1}
|
| 35 |
+
$$
|
| 36 |
+
|
| 37 |
+
where $M_{ij}$ is the sampled $(i,j)$ -th element of the ground truth matrix $M$ , and $x_i, x_j$ and $x_{i, +}, x_{j, +}$ denote the $i$ -th and $j$ -th rows of the current iterate $X_t$ and new iterate $X_{t+1}$ . Pairwise losses like the BPR can be shown to have a similar update equation over three rows of $X$ [1]. Given that only two or three rows of $X$ are accessed and updated at any time, SGD is readily accessible to massive parallelization and distributed computing. For very large values of $d$ , the update equation (1) can be run by multiple workers in parallel without locks, with vanishing probability of collision [2]. The blocks of $X$ that are more frequently accessed together can be stored on the same node in a distributed memory system.
|
| 38 |
+
|
| 39 |
+
Unfortunately, the convergence rate of SGD can sometimes be extremely slow. One possible explanation, as many recent authors have pointed out [3-6], is that matrix factorization models are very sensitive to ill-conditioning of the ground truth matrix $M$ . The number of SGD iterations grows at least linearly the condition number $\kappa$ , which here is defined as the ratio between the largest and the $r$ -th largest singular values of $M$ . Ill-conditioning causes particular concern because most real-world data are ill-conditioned. In one widely cited study [7], it was found that the dominant singular value accounts for only $\approx 80\%$ prediction accuracy, with diversity of individual preferences making up the remainder ill-conditioned singular values. Cloninger et al. [8] notes that there are certain applications of matrix completion that have condition numbers as high as $\kappa = 10^{15}$ .
|
| 40 |
+
|
| 41 |
+
This paper is inspired by a recent full-batch gradient method called ScaledGD [4, 9] and a closely related algorithm PrecGD [5] in which gradient descent is made immune to ill-conditioning in the ground truth by right-rescaling the full-batch gradient by the matrix $(X^T X)^{-1}$ . Applying this same strategy to the SGD update equation (1) yields the row-wise updates
|
| 42 |
+
|
| 43 |
+
$$
|
| 44 |
+
x _ {i, +} = x _ {i} - \alpha \cdot \left(x _ {i} ^ {T} x _ {j} - M _ {i j}\right) P x _ {j}, \quad x _ {j, +} = x _ {j} - \alpha \cdot \left(x _ {i} ^ {T} x _ {j} - M _ {i j}\right) P x _ {i}, \tag {2a}
|
| 45 |
+
$$
|
| 46 |
+
|
| 47 |
+
in which we precompute and cache the preconditioner $P = (X^T X)^{-1}$ ahead of time $^1$ , and update it after the iteration as
|
| 48 |
+
|
| 49 |
+
$$
|
| 50 |
+
P _ {+} = \left(P ^ {- 1} + x _ {i, +} x _ {i, +} ^ {T} + x _ {j, +} x _ {j, +} ^ {T} - x _ {i} x _ {i} ^ {T} - x _ {j} x _ {j} ^ {T}\right) ^ {- 1} \tag {2b}
|
| 51 |
+
$$
|
| 52 |
+
|
| 53 |
+
by making four calls to the Sherman-Morrison rank-1 update formula
|
| 54 |
+
|
| 55 |
+
$$
|
| 56 |
+
\left(P ^ {- 1} + u u ^ {T}\right) ^ {- 1} = P - \frac {P u u ^ {T} P}{1 + u ^ {T} P u}, \qquad \left(P ^ {- 1} - u u ^ {T}\right) ^ {- 1} = P + \frac {P u u ^ {T} P}{1 - u ^ {T} P u}.
|
| 57 |
+
$$
|
| 58 |
+
|
| 59 |
+
This way, the rescaled update equations use just $O(r^2)$ arithmetic operations, which for modest values of $r$ is only marginally more than the $O(r)$ cost of the unscaled update equations (1). Indeed, the nearest-neighbor algorithms inside most collaborative filters have exponential complexity with respect to the latent dimensionality $r$ , and so are often implemented with $r$ small enough for (1) and (2) to have essentially the same runtime. Here, we observe that the rescaled update equations (2) preserve essentially all of the practical advantages of SGD for huge-scale, online optimization: it can also be run by multiple workers in parallel without locks, and it can also be easily implemented over distributed memory. The only minor difference is that separate copies of $P$ should be maintained by each worker, and resynchronized once differences grow large.
|
| 60 |
+
|
| 61 |
+
Contributions In this paper, we provide a rigorous proof that the rescaled update equations (1), which we name ScaledSGD, become immune to the effects of ill-conditioning in the underlying ground truth matrix. For symmetric matrix completion under the root mean squared error (RMSE) loss function, regular SGD is known to have an iteration count of $O(\kappa^4 \cdot dr \log(d / \epsilon))$ within a local neighborhood of the ground truth [10]. This figure is optimal in the dimension $d$ , the rank
|
| 62 |
+
|
| 63 |
+
$r$ , and the final accuracy $\epsilon$ , but suboptimal by four exponents with respect to condition number $\kappa$ . In contrast, we prove for the same setting that ScaledSGD attains an optimal convergence rate, converging to $\epsilon$ -accuracy in $O(dr\log (d / \epsilon))$ iterations for all values of the condition number $\kappa$ . In fact, our theoretical result predicts that ScaledSGD converges as if the ground truth matrix is perfectly conditioned, with a condition number of $\kappa = 1$ .
|
| 64 |
+
|
| 65 |
+
At first sight, it appears quite natural that applying the ScaledGD preconditioner to SGD should result in accelerated convergence. However, the core challenge of stochastic algorithms like SGD is that each iteration can have substantial variance that "drown out" the expected progress made in the iteration. In the case of ScaledSGD, a rough analysis would suggest that the highly ill-conditioned preconditioner should improve convergence in expectation, but at the cost of dramatically worsening the variance.
|
| 66 |
+
|
| 67 |
+
Surprisingly, we find in this paper that the specific scaling $(X^T X)^{-1}$ used in ScaledSGD not only does not worsen the variance, but in fact improves it. Our key insight and main theoretical contribution is Lemma 4, which shows that the same mechanism that allows ScaledGD to converge faster (compared to regular GD) also allows ScaledSGD to enjoy reduced variance (compared to regular SGD). In fact, it is this effect of variance reduction that is responsible for most $(\kappa^3$ out of $\kappa^4)$ of our improvement over the previous state-of-the-art. It turns out that a careful choice of preconditioner can be used as a mechanism for variance reduction, while at the same time also fulfilling its usual, classical purpose, which is to accelerate convergence in expectation.
|
| 68 |
+
|
| 69 |
+
Related work Earlier work on matrix completion analyzed a convex relaxation of the original problem, showing that nuclear norm minimization can recover the ground truth from a few incoherent measurements [11-15]. This approach enjoys a near optimal sample complexity but incurs an $O(d^{3})$ per-iteration computational cost, which is prohibitive for a even moderately large $d$ . More recent work has focused more on a nonconvex formulation based on Burer and Monteiro [16], which factors the optimization variable as $M = XX^T$ where $X \in \mathbb{R}^{d \times r}$ and applies a local search method such as alternating-minimization [17-20], projected gradient descent [21, 22] and regular gradient descent [23-26]. A separate line of work [27-35] focused on global properties of nonconvex matrix recovery problems, showing that the problem has no spurious local minima if sampling operator satisfies certain regularity conditions such as incoherence or restricted isometry.
|
| 70 |
+
|
| 71 |
+
The convergence rate of SGD has been well-studied for general classes of functions [36-39]. For matrix completion in particular, Jin et al. [10] proved that SGD converges towards an $\epsilon$ -accurate solution in $O(d\kappa^4\log (1 / \epsilon))$ iterations where $\kappa$ is the condition number of $M$ . Unfortunately, this quartic dependence on $\kappa$ makes SGD extremely slow and impractical for huge-scale applications.
|
| 72 |
+
|
| 73 |
+
This dramatic slow down of gradient descent and its variants caused by ill-conditioning has become well-known in recent years. Several recent papers have proposed full-batch algorithms to overcome this issue [9, 40, 41], but these methods cannot be used in the huge-scale optimization setting where $d$ is so large that even full-vector operations with $O(d)$ time complexity are too expensive. As a deterministic full-batch method, ScaledGD [9] requires a projection onto the set of incoherent matrices at every iteration in order to maintain rapid convergence. Instead our key finding here is that the stochasticity of SGD alone is enough to keep the iterates as incoherent as the ground truth, which allows for rapid progress to be made. The second-order method proposed in [41] costs at least $O(d)$ per-iteration and has no straightforward stochastic analog. PrecGD [5] only applies to matrices that satisfies matrices satisfying the restricted isometry property, which does not hold for matrix completion.
|
| 74 |
+
|
| 75 |
+
# 2 Background: Linear convergence of SGD
|
| 76 |
+
|
| 77 |
+
In our theoretical analysis, we restrict our attention to symmetric matrix completion under the root mean squared error (RMSE) loss function. Our goal is to solve the following nonconvex optimization
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\min _ {X \in \mathbb {R} ^ {d \times r}} f (X) \stackrel {\text {d e f}} {=} \| X X ^ {T} - Z Z ^ {T} \| _ {F} ^ {2} \quad \text {w h e r e} Z = \left[ z _ {1}, z _ {2}, \dots , z _ {n} \right] ^ {T} \in \mathbb {R} ^ {d \times r} \tag {3}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
in which we assume that the $d \times d$ ground truth $ZZ^T \succeq 0$ matrix is exactly rank- $r$ , with a finite condition number
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\kappa \stackrel {\text {d e f}} {=} \lambda_ {\max } \left(Z Z ^ {T}\right) / \lambda_ {r} \left(Z Z ^ {T}\right) = \lambda_ {\max } \left(Z ^ {T} Z\right) / \lambda_ {\min } \left(Z ^ {T} Z\right) < \infty . \tag {4}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
In order to be able to reconstruct $ZZ^T$ from a small number of measurements, we will also need to assume that the ground truth has small coherence [42]
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
\mu \stackrel {\text {d e f}} {=} \frac {d}{r} \cdot \max _ {1 \leq i \leq d} \| e _ {i} ^ {T} Z (Z ^ {T} Z) ^ {- 1 / 2} \| ^ {2}. \tag {5}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
Recall that $\mu$ takes on a value from 1 to $d / r$ , with the smallest achieved by dense, orthonormal choices of $Z$ whose rows all have magnitudes of $1 / \sqrt{d}$ , and the largest achieved by a ground truth $ZZ^T$ containing a single nonzero element. Assuming incoherence $\mu = O(1)$ with respect to $d$ , it is a well-known result that all $d^2$ matrix elements of $ZZ^T$ can be perfectly reconstructed from just $O(dr\log d)$ random samples of its matrix elements [12, 43].
|
| 96 |
+
|
| 97 |
+
This paper considers solving (3) in the huge-scale, online optimization setting, in which individual matrix elements of the ground truth $(ZZ^T)_{i,j} = z_i^T z_j$ are revealed one-at-a-time, uniformly at random with replacement, and that a current iterate $X$ is continuously updated to streaming data. We note that this is a reasonably accurate model for how recommendation engines are tuned to user preferences in practice, although the uniformity of random sampling is admittedly an assumption made to ease theoretical analysis. Define the stochastic gradient operator as
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
S G (X) = 2 d ^ {2} \cdot (x _ {i} ^ {T} x _ {j} - z _ {i} ^ {T} z _ {j}) (e _ {i} x _ {j} ^ {T} + e _ {j} x _ {i} ^ {T}) \quad \mathrm {w h e r e} (i, j) \sim \mathrm {U n i f} ([ d ] \times [ d ]),
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where $x_{i}, x_{j} \in \mathbb{R}^{r}$ are the $i$ -th and $j$ -th rows of $X$ , and the scaling $d^{2}$ is chosen that, over the randomness of the sampled index $(i,j)$ , we have exactly $\mathbb{E}[SG(X)] = \nabla f(X)$ . Then, the classical online SGD algorithm can be written as
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
X _ {t + 1} = X _ {t} - \alpha S G \left(X _ {t}\right) \quad \text {w h e r e} \alpha > 0. \tag {SGD}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
Here, we observe that a single iteration of SGD coincides with full-batch gradient descent in expectation, as in $\mathbb{E}[X_{t + 1}|X_t] = X_t - \alpha \nabla f(X_t)$ . Therefore, assuming that bounded deviations and bounded variances, it follows from standard arguments that the behavior of many iterations of SGD should concentrate about that of full-batch gradient descent $X_{t + 1} = X_{t} - \alpha \nabla f(X_{t})$ .
|
| 110 |
+
|
| 111 |
+
Within a region sufficiently close to the ground truth, full-batch gradient descent is well-known to converge at a linear rate to the ground truth [23, 44]. Within this same region, Jin et al. [10] proved that SGD also converges linearly. For an incoherent ground truth with $\mu = O(1)$ , they proved that SGD with an aggressive choice of step-size is able to recover the ground truth to $\epsilon$ -accuracy $O(\kappa^4 dr\log (d / \epsilon))$ iterations, with each iteration costing $O(r)$ arithmetic operations and selecting 1 random sample. This iteration count is optimal with respect to $d$ , $r$ , and $1 / \epsilon$ , although its dependence on $\kappa$ is a cubic factor (i.e., a factor of $\kappa^3$ ) worse than full-batch gradient descent's figure of $O(\kappa \log (1 / \epsilon))$ , which is itself already quite bad, given that $\kappa$ in practice can readily take on values of $10^{3}$ to $10^{6}$ .
|
| 112 |
+
|
| 113 |
+
Theorem 1 (Jin, Kakade, and Netrapalli [10]). For $Z \in \mathbb{R}^{d \times r}$ with $\sigma_{\max}(Z) = 1$ and $f(X) = \|XX^T - ZZ^T\|_F^2$ and $h_i(X) = \|e_i^TX\|^2$ , define the following
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
f _ {\max } \stackrel {\mathrm {d e f}} {=} \left(\frac {1}{1 0 \kappa}\right) ^ {2}, \qquad h _ {\max } \stackrel {\mathrm {d e f}} {=} 2 0 \cdot \kappa^ {2} \cdot \frac {\mu r}{d}.
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
For an initial point $X_0 \in \mathbb{R}^{d \times r}$ that satisfies $f(X_0) \leq \frac{1}{2} f_{\max}$ and $\max_i h_i(X_0) \leq \frac{1}{2} h_{\max}$ , there exists some constant $c$ such that for any learning rate $\alpha < c \cdot (\kappa \cdot h_{\max} \cdot d^2 \log d)^{-1}$ , with probability at least $1 - T / d^{10}$ , we will have for all $t \leq T$ iterations of SGD that
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
f (X _ {t}) \leq \left(1 - \frac {\alpha}{2 \cdot \kappa}\right) ^ {t} \cdot f _ {\max }, \quad \max _ {i} h _ {i} (X _ {t}) \leq h _ {\max }.
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
The reason for Theorem 1's additional $\kappa^3$ dependence beyond full-batch gradient descent is due to its need to maintain incoherence in its iterates. Using standard techniques on martingale concentration, one can readily show that SGD replicates a single iteration of full-batch gradient descent over an epoch of $d^2$ iterations. This results in an iteration count $O(\kappa \cdot d^2\log (1 / \epsilon))$ with an optimal dependence on $\kappa$ , but the entire matrix is already fully observed after collecting $d^2$ samples. Instead, Jin et al. [10] noted that the variance of SGD iterations is controlled by the step-size $\alpha$ times the maximum coherence $\mu_{X} = \frac{d}{r}\cdot \max_{i,t}\| e_{i}^{T}X_{t}\|^{2}$ over the iterates $X_{t},X_{t - 1},\ldots ,X_{0}$ . If the iterates can be kept incoherent with $\mu_{X} = O(1)$ , then SGD with a more aggressive step-size will reproduce an iteration of full-batch gradient descent after an epoch of just $O(dr\log d)$ iterations.
|
| 126 |
+
|
| 127 |
+
The main finding in Jin et al. [10]'s proof of Theorem 1 is that the stochasticity of SGD is enough to keep the iterates incoherent. This contrasts with full-batch methods at the time, which required an added regularizer [20, 30, 45] or an explicit projection step [9]. (As pointed out by a reviewer, it was later shown by Ma et al. [46] that full-batch gradient descent is also able to maintain incoherence without a regularizer nor a projection.) Unfortunately, maintaining incoherence requires shrinking the step-size by a factor of $\kappa$ , and the actual value of $\mu_{X}$ that results is also a factor of $\kappa^2$ worse than the original coherence $\mu$ of the ground truth $Z$ . The resulting iteration count $O(\kappa^4 \cdot dr \log(d / \epsilon))$ is made optimal with respect to $d$ , $r$ , and $1 / \epsilon$ , but only at the cost of worsening its the dependence on the condition number $\kappa$ by another three exponents.
|
| 128 |
+
|
| 129 |
+
Finally, the quality of the initial point $X_0$ also has a dependence on the condition number $\kappa$ . In order to guarantee linear convergence, Theorem 1 requires $X_0$ to lie in the neighborhood $\| X_0X_0^T -ZZ^T\| _F < \lambda_{\min}(Z^T Z) = O(\kappa^{-1})$ . This dependence on $\kappa$ is optimal, because full-batch gradient descent must lose its ability to converge linearly in the limit $\kappa \to \infty$ [5, 6]. However, the leading constant can be very pessimistic, because the theorem must formally exclude spurious critical points $X_{\mathrm{spur}}$ that have $\nabla f(X_{\mathrm{spur}}) = 0$ but $f(X_{\mathrm{spur}}) > 0$ in order to be provably correct. In practice, it is commonly observed that SGD converges globally, starting from an arbitrary, possibly random initialization [30], at a linear rate that is consistent with local convergence theorems like Theorem 1. It is now commonly argued that gradient methods can escape saddle points with high probability [47], and so their performance is primarily dictated by local convergence behavior [48, 49].
|
| 130 |
+
|
| 131 |
+
# 3 Proposed algorithm and main result
|
| 132 |
+
|
| 133 |
+
Inspired by a recent full-batch gradient method called ScaledGD [4, 9] and a closely related algorithm PrecGD [5], we proposed the following algorithm
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
X _ {t + 1} = X _ {t} - \alpha S G \left(X _ {t}\right) \left(X _ {t} ^ {T} X _ {t}\right) ^ {- 1} \quad \text {w h e r e} \alpha > 0. \tag {ScaledSGD}
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
As we mentioned in the introduction, the preconditioner $P = (X^T X)^{-1}$ can be precomputed and cached in a practical implementation, and afterwards efficiently updated using the Sherman-Morrison formula. The per-iteration cost of ScaledSGD is $O(r^{2})$ arithmetic operations and 1 random sample, which for modest values of $r$ is only marginally more than the cost of SGD.
|
| 140 |
+
|
| 141 |
+
Our main result in this paper is that, with a region sufficiently close to the ground truth, this simple rescaling allows ScaledSGD to converge linearly to $\epsilon$ -accuracy $O(dr\log (d / \epsilon))$ iterations, with no further dependence on the condition number $\kappa$ . This iteration count is optimal with respect to $d$ , $r$ , $1 / \epsilon$ , and $\kappa$ , and in fact matches SGD with a perfectly conditioned ground truth $\kappa = 1$ . In our numerical experiments, we observe that ScaledSGD converges globally from a random initialization at the same rate as SGD as if $\kappa = 1$ .
|
| 142 |
+
|
| 143 |
+
Theorem 2 (Main). For $Z \in \mathbb{R}^{d \times r}$ with $\sigma_{\max}(Z) = 1$ and $f(X) = \| XX^T - ZZ^T \|_F^2$ and $g_i(X) = e_i^T X (X^T X)^{-1} X^T e_i$ , select a radius $\rho < 1/2$ and set
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
f _ {\mathrm {m a x}} \stackrel {\mathrm {d e f}} {=} \left(\frac {\rho}{\kappa}\right) ^ {2}, \qquad g _ {\mathrm {m a x}} \stackrel {\mathrm {d e f}} {=} \frac {2 ^ {4}}{(1 - 2 \rho) ^ {2}} \cdot \frac {\mu r}{d}.
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+
For an initial point $X_0 \in \mathbb{R}^{d \times r}$ that satisfies $f(X_0) \leq \frac{1}{2} f_{\max}$ and $\max_i g_i(X_0) \leq \frac{1}{2} g_{\max}$ , there exists some constant $c$ such that for any learning rate $\alpha < c \cdot [(g_{\max} + \rho) \cdot d^2 \log d]^{-1}$ , with probability at least $1 - T / d^{10}$ , we will have for all $t \leq T$ iterations of ScaledSGD that:
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
f (X _ {t}) \leq \left(1 - \frac {\alpha}{2}\right) ^ {t} \cdot f _ {\max }, \quad \max _ {i} g _ {i} (X _ {t}) \leq g _ {\max }.
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
Theorem 2 eliminates all dependencies on the condition number $\kappa$ in Theorem 1 except for the quality of the initial point, which we had already noted earlier as being optimal. Our main finding is that it is possible to maintain incoherence while making aggressive step-sizes towards a highly ill-conditioned ground truth $ZZ^T$ . In fact, Theorem 2 says that, with high probability, the maximum coherence $\mu_X$ over of any iterate $X_{t}$ will only be a mild constant factor of $\approx 16$ times worse than the coherence $\mu$ of the ground truth $ZZ^T$ . This is particularly surprising in view of the fact that every iteration of ScaledSGD involves inverting a potentially highly ill-conditioned matrix $(X^T X)^{-1}$ . In
|
| 156 |
+
|
| 157 |
+
contrast, even without inverting matrices, Theorem 1 says that SGD is only able to keep $\mu_{X}$ within a factor of $\kappa^2$ of $\mu$ , and only by shrinking the step-size $\alpha$ by another factor of $\kappa$ .
|
| 158 |
+
|
| 159 |
+
However, the price we pay for maintaining incoherence is that the quality of the initial point $X_0$ now gains a dependence on dimension $d$ , in addition to the condition number $\kappa$ . In order to guarantee fast linear convergence independent of $\kappa$ , Theorem 2 requires $X_0$ to lie in the neighborhood $\| X_0X_0^T - ZZ^T\|_F < \mu r\lambda_{\min}(Z^T Z) / d = (\kappa d)^{-1}$ , so that $\rho$ can be set to be the same order of magnitude as $g_{\max}$ . In essence, the "effective" condition number of the ground truth has been worsened by another factor of $d$ . This shrinks the size of our local neighborhood by a factor of $d$ , but has no impact on the convergence rate of the resulting iterations.
|
| 160 |
+
|
| 161 |
+
In the limit that $\kappa \to \infty$ and the search rank $r$ becomes overparameterized with respect to the true rank $r^{\star} < r$ of $ZZ^{T}$ , both full-batch gradient descent and SGD slows down to a sublinear convergence rate, in theory and in practice [5, 6]. While Theorem 2 is no longer applicable, we observe in our numerical experiments that ScaledSGD nevertheless maintains its fast linear convergence rate as if $\kappa = 1$ . Following PrecGD [5], we believe that introducing a small identity perturbation to the scaling matrix of ScaledSGD, as in $(X^{T}X + \eta I)^{-1}$ for some $\eta \approx \sqrt{f(X)}$ , should be enough to rigorously extend Theorem 2 to the overparameterized regime. We leave this extension as future work.
|
| 162 |
+
|
| 163 |
+
# 4 Key ideas for the proof
|
| 164 |
+
|
| 165 |
+
We begin by explaining the mechanism by which SGD slows down when converging towards an ill-conditioned ground truth. Recall that
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
\mathbb {E} [ S G (X) ] = \mathbb {E} [ 2 d ^ {2} \cdot (X X ^ {T} - Z Z ^ {T}) _ {i, j} \cdot (e _ {i} e _ {j} ^ {T} + e _ {j} e _ {i} ^ {T}) X ] = 4 (X X ^ {T} - Z Z ^ {T}) X = \nabla f (X).
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
As $XX^T$ converges towards an ill-conditioned ground truth $ZZ^T$ , the factor matrix $X$ must become progressively ill-conditioned, with
|
| 172 |
+
|
| 173 |
+
$$
|
| 174 |
+
\lambda_ {\min } \left(X ^ {T} X\right) = \lambda_ {r} \left(X X ^ {T}\right) \leq \lambda_ {r} \left(Z Z ^ {T}\right) + \left\| X X ^ {T} - Z Z ^ {T} \right\| _ {F} \leq \frac {1 + \rho}{\kappa}.
|
| 175 |
+
$$
|
| 176 |
+
|
| 177 |
+
Therefore, it is possible for components of the error vector $XX^{T} - ZZ^{T}$ to become "invisible" by aligning within the ill-conditioned subspaces of $X$ . As SGD progresses towards the solution, these ill-conditioned subspaces of $X$ become the slowest components of the error vector to converge to zero. On the other hand, the maximum step-size that can be taken is controlled by the most well-conditioned subspaces of $X$ . A simple idea, therefore, is to rescale the ill-conditioned components of the gradient $\nabla f(X)$ in order to make the ill-conditioned subspaces of $X$ more "visible".
|
| 178 |
+
|
| 179 |
+
More concretely, define the local norm of the gradient as $\| \nabla f(X)\| _X = \| \nabla f(X)(X^T X)^{1 / 2}\| _F$ and its corresponding dual norm as $\| \nabla f(X)\| _X^* = \| \nabla f(X)(X^T X)^{-1 / 2}\| _F$ . It has long been known (see e.g. [23, 44]) that rescaling the gradient yields
|
| 180 |
+
|
| 181 |
+
$$
|
| 182 |
+
\| \nabla f (X) \| _ {X} ^ {*} \stackrel {{\text {d e f}}} {{=}} \| 4 (X X ^ {T} - Z Z ^ {T}) X (X ^ {T} X) ^ {- 1 / 2} \| _ {F} = 4 \cos \theta \cdot \| X X ^ {T} - Z Z ^ {T} \| _ {F},
|
| 183 |
+
$$
|
| 184 |
+
|
| 185 |
+
where $\theta$ is the angle between the error vector $XX^{T} - ZZ^{T}$ and the linear subspace $\{XY^{T} + YX^{T}:Y\in \mathbb{R}^{d\times r}\}$ . This insight immediately suggests an iteration like $X_{+} = X - \alpha \nabla f(X)(X^{T}X)^{-1}$ . In fact, the gradients of $f$ have some Lipschitz constant $L$ , so
|
| 186 |
+
|
| 187 |
+
$$
|
| 188 |
+
\begin{array}{l} f (X _ {+}) \leq f (X) - \alpha \langle \nabla f (X), \nabla f (X) (X ^ {T} X) ^ {- 1} \rangle + \frac {L}{2} \alpha^ {2} \| \nabla f (X) (X ^ {T} X) ^ {- 1} \| _ {F} ^ {2}, \\ \leq f (X) - \alpha (\| \nabla f (X) \| _ {X} ^ {*}) ^ {2} + \frac {L _ {X}}{2} \alpha^ {2} (\| \nabla f (X) \| _ {X} ^ {*}) ^ {2}, \\ \leq \left[ 1 - \alpha \cdot 8 \cos^ {2} \theta \right] f (X) \quad \text {f o r} \alpha \leq 1 / L _ {X}. \\ \end{array}
|
| 189 |
+
$$
|
| 190 |
+
|
| 191 |
+
However, a naive analysis finds that $L_{X} = L / \lambda_{\mathrm{min}}(X^{T}X) \approx L \cdot \kappa$ , and this causes the step-size to shrink by a factor of $\kappa$ . The main motivating insight behind ScaledGD [4, 9] and later PrecGD [5] is that, with a finer analysis, it is possible to prove Lipschitz continuity under a local change of norm.
|
| 192 |
+
|
| 193 |
+
Lemma 3 (Function descent). Let $X, Z \in \mathbb{R}^{n \times r}$ satisfy $\|XX^T - ZZ^T\|_F \leq \rho \lambda_{\min}(Z^T Z)$ where $\rho < 1/2$ . Then, the function $f(X) = \|XX^T - ZZ^T\|_F^2$ satisfies
|
| 194 |
+
|
| 195 |
+
$$
|
| 196 |
+
f (X + V) \leq f (X) + \langle \nabla f (X), V \rangle + \frac {L _ {X}}{2} \| V \| _ {X} ^ {2}, \quad \left(\| \nabla f (X) \| _ {X} ^ {*}\right) ^ {2} \geq 1 3 \cdot f (X)
|
| 197 |
+
$$
|
| 198 |
+
|
| 199 |
+
for all $\| V\| _X\leq C\cdot \sqrt{f(X)}$ with $L_{X} = 6 + 8C + 2C^{2} = O(1 + C^{2})$
|
| 200 |
+
|
| 201 |
+
This same idea can be "stochastified" in a straightforward manner. Conditioning on the current iterate $X$ , then the new iterate $X_{+} = X - \alpha SG(X)(X^{T}X)^{-1}$ has expectation
|
| 202 |
+
|
| 203 |
+
$$
|
| 204 |
+
\mathbb {E} [ f (X _ {+}) ] \leq f (X) - \alpha \langle \nabla f (X), \mathbb {E} [ S G (X) (X ^ {T} X) ^ {- 1} ] \rangle + \alpha \frac {L _ {X}}{2} \mathbb {E} [ (\| S G (X) \| _ {X} ^ {*}) ^ {2} ].
|
| 205 |
+
$$
|
| 206 |
+
|
| 207 |
+
The linear term evaluates as $\mathbb{E}[SG(X)(X^T X)^{-1}] = \nabla f(X)(X^T X)^{-1}$ , while the quadratic term is
|
| 208 |
+
|
| 209 |
+
$$
|
| 210 |
+
\mathbb {E} [ (\| S G (X) \| _ {X} ^ {*}) ^ {2} ] \leq \sum_ {i, j} 4 d ^ {2} \cdot (X X ^ {T} - Z Z ^ {T}) _ {i, j} ^ {2} \cdot 4 \max _ {i} (\| e _ {i} ^ {T} X \| _ {X} ^ {*}) ^ {2} = 1 6 \cdot f (X) \cdot \max _ {i} g _ {i} (X),
|
| 211 |
+
$$
|
| 212 |
+
|
| 213 |
+
where $g_{i}(X) = e_{i}^{T}X(X^{T}X)^{-1}X^{T}e_{i} = (\| e_{i}^{T}X\|_{X}^{*})^{2}$ . Combined, we obtain geometric convergence
|
| 214 |
+
|
| 215 |
+
$$
|
| 216 |
+
\mathbb {E} [ f (X _ {+}) ] \leq (1 - \alpha \cdot 8 \cos^ {2} \theta) f (X) \quad \text {f o r} \alpha = O \left(g _ {\max } ^ {- 1} \cdot d ^ {- 2}\right). \tag {6}
|
| 217 |
+
$$
|
| 218 |
+
|
| 219 |
+
We see that the step-size depends crucially on the incoherence $g_{i}(X) \leq g_{\max}$ of the current iterate. If the current iterate $X$ is incoherent with $g_{\max} = O(1 / d)$ , then a step-size of $\alpha = O(1 / d)$ is possible, resulting in convergence in $O(dr\log (d / \epsilon))$ iterations, which can be shown using standard martingale techniques [10]. But if the current iterate is $g_{\max} = O(1)$ , then only a step-size of $\alpha = O(1 / d^2)$ is possible, which forces us to compute $d^2$ iterations, thereby obviating the need to complete the matrix in the first place.
|
| 220 |
+
|
| 221 |
+
Therefore, in order for prove rapid linear convergence, we need to additionally show that with high probability, the coherence $g_{k}(X) = (\| e_{k}^{T}X\|_{X}^{*})^{2}$ remains $O(1)$ throughout ScaledGD iterations. This is the most challenging part of our proof. Previous methods that applied a similar scaling to full-batch GD [9] required an explicit projection onto the set of incoherent matrices at each iteration. Applying a similar projection to ScaledSGD will take $O(d)$ time, which destroys the scalability of our method. On the other hand, Jin et al. [10] showed that the randomness in SGD is enough to keep the coherence of the iterates within a factor of $\kappa^2$ times worse than the coherence of the ground truth, and only by a step-size of at most $\alpha = O(\kappa^{-1})$ .
|
| 222 |
+
|
| 223 |
+
Surprisingly, here we show that the randomness in ScaledSGD is enough to keep the coherence of the iterates with a constant factor of the coherence the ground truth, using a step-size with no dependence on $\kappa$ . The following key lemma is the crucial insight of our proof. First, it says that function $g_{k}(X)$ satisfies a "descent lemma" with respect to the local norm $\| \cdot \| _X^*$ . Second, and much more importantly, it says that descending $g_{k}(X)$ along the scaled gradient direction $\nabla f(X)(X^T X)^{-1}$ incurs a linear decrement $\frac{1 - 2\rho}{1 - \rho} g_k(X)$ with no dependence of the condition number $\kappa$ . This is in direct analogy to the function value decrement in (6), which has no dependence on $\kappa$ , and in direct contrast to the proof of Jin et al. [10], which is only able to achieve a decrement of $(8 / \kappa)g_{k}(X)$ due to the lack of rescaling by $(X^{T}X)^{-1}$ .
|
| 224 |
+
|
| 225 |
+
Lemma 4 (Coherence descent). Let $g_{k}(X) = e_{k}^{T}X(X^{T}X)^{-1}X^{T}e_{k}$ . Under the same conditions as Lemma 3, we have
|
| 226 |
+
|
| 227 |
+
$$
|
| 228 |
+
g _ {k} (X + V) \leq g _ {k} (X) + \langle V, \nabla g _ {k} (X) \rangle + \frac {5 (\| V \| _ {X} ^ {*}) ^ {2}}{1 - 2 \| V \| _ {X} ^ {*}},
|
| 229 |
+
$$
|
| 230 |
+
|
| 231 |
+
$$
|
| 232 |
+
\langle \nabla g _ {k} (X), \nabla f (X) (X ^ {T} X) ^ {- 1} \rangle \geq \left[ \frac {1 - 2 \rho}{1 - \rho} g _ {k} (X) - \frac {1}{1 - \rho} \sqrt {g _ {k} (X) g _ {k} (Z)} \right].
|
| 233 |
+
$$
|
| 234 |
+
|
| 235 |
+
Conditioning on $X$ , we have for the search direction $V = SG(X)(X^T X)^{-1}$ and $X_{+} = X + V$
|
| 236 |
+
|
| 237 |
+
$$
|
| 238 |
+
\begin{array}{l} \mathbb {E} \left[ g _ {k} (X _ {+}) \right] \leq g _ {k} (X) - \alpha \langle \nabla g _ {k} (X), \mathbb {E} [ V ] \rangle + \alpha^ {2} \cdot \mathbb {E} \left[ \frac {(\| V \| _ {X} ^ {*}) ^ {2}}{1 - 2 \| V \| _ {X} ^ {*}} \right] \\ \leq \left(1 - \frac {1 - 2 \rho}{1 - \rho} \alpha\right) g _ {k} (X) + \alpha \cdot \frac {1}{1 - \rho} \cdot \sqrt {g _ {k} (X) g _ {k} (Z)} + \alpha^ {2} \cdot \frac {\mathbb {E} \left[ (\| V \| _ {X} ^ {*}) ^ {2} \right]}{1 - 2 \| V \| _ {X} ^ {*}} \\ \leq \left(1 - \frac {1 - 2 \rho}{1 - \rho} \alpha\right) g _ {k} (X) + \alpha \cdot \frac {\sqrt {\mu / g _ {\mathrm {m a x}}}}{1 - \rho} \cdot g _ {\mathrm {m a x}} + \alpha^ {2} \cdot \frac {O (d ^ {2} \cdot g _ {\mathrm {m a x}} \cdot \rho^ {2})}{1 - O (g _ {\mathrm {m a x}} ^ {1 / 2} \cdot \rho)} \\ \leq \left(1 - \zeta \alpha\right) g _ {k} (X) + \alpha \cdot \frac {\zeta}{2} g _ {\max } \quad \text {f o r} \alpha = O \left(\rho^ {- 1} d ^ {- 2}\right). \tag {7} \\ \end{array}
|
| 239 |
+
$$
|
| 240 |
+
|
| 241 |
+
It then follows that $g_{k}(X_{+})$ converges geometrically towards $\frac{1}{2} g_{\max}$ in expectation, with a convergence rate $(1 - \zeta \alpha)$ that is independent of the condition number $\kappa$ :
|
| 242 |
+
|
| 243 |
+
$$
|
| 244 |
+
\mathbb {E} \big [ g _ {k} (X _ {+}) - \frac {1}{2} g _ {\max} \big ] \leq \left[ (1 - \zeta \alpha) g _ {k} (X) + \alpha \cdot \frac {\zeta}{2} g _ {\max} \right] - \frac {1}{2} g _ {\max} \leq (1 - \zeta \alpha) \left[ g _ {k} (X) - \frac {1}{2} g _ {\max} \right].
|
| 245 |
+
$$
|
| 246 |
+
|
| 247 |
+
The proof of Theorem 2 then follows from standard techniques, by making the two decrement conditions (6) and (7) into supermartingales and applying a standard concentration inequality. We defer the rigorous proof to appendix E.
|
| 248 |
+
|
| 249 |
+
# 5 Experimental validation
|
| 250 |
+
|
| 251 |
+
In this section we compare the practical performance of ScaledSGD and SGD for the RMSE loss function in Theorem 2 and two real-world loss functions: the pairwise RMSE loss used to complete Euclidean Distance Matrices (EDM) in wireless communication networks; and the Bayesian Personalized Ranking (BRP) loss used to generate personalized item recommendation in collaborative filtering. In each case, ScaledSGD remains highly efficient since it only updates two or three rows at a time, and the preconditioner $P$ can be computed through low-rank updates, for a per-iteration cost of $O(r^2)$ . All of our experiments use random Gaussian initializations and an initial $P = \sigma^2 I$ . To be able to accurately measure and report the effects of ill-conditioning on ScaledSGD and SGD, we focus on small-scale synthetic datasets in the first two experiments, for which the ground truth is explicitly known, and where the condition numbers can be finely controlled. In addition, to gauge the scalability of ScaledSGD on huge-scale real-world datasets, in the third experiment, we apply ScaledSGD to generate personalized item recommendation using MovieLens25M dataset [50], for which the underlying item-item matrix has more than 62,000 items and 100 million pairwise samples are used during training. (Due to space constraints, we defer the details on the experimental setup, mathematical formulations, and the actual update equations to Appendix A.) The code for all experiments are available at https://github.com/Hong-Ming/ScaledSGD.
|
| 252 |
+
|
| 253 |
+
Matrix completion with RMSE loss. The problem formulation is discussed in Section 3. Figure 1 plots the error $f(X) = \| XX^T - M \|^2_F$ as the number of epochs increases. As expected, in the well-conditioned case, both ScaledSGD and SGD converges to machine error at roughly the same linear rate. However, in the ill-conditioned case, SGD slows down significantly while ScaledSGD converges at almost exactly the same rate as in the well-conditioned case.
|
| 254 |
+
|
| 255 |
+

|
| 256 |
+
Figure 1: Matrix Completion with RMSE loss. We compare the convergence rate of ScaledSGD and SGD for a well-conditioned and ill-conditioned ground truth matrix of size $30 \times 30$ and rank 3. (Left) Well-conditioned $M$ , $\kappa(M) = 1$ . Step-size $\alpha = 0.3$ . Both ScaledSGD and SGD converges quickly to the ground truth. (Right) Ill-conditioned $M$ , $\kappa(M) = 10^4$ . Step-size $\alpha = 0.3$ . SGD stagnates while ScaledSGD retains the same convergence rate as the well-conditioned case.
|
| 257 |
+
|
| 258 |
+

|
| 259 |
+
|
| 260 |
+
Euclidean distance matrix (EDM) completion. The Euclidean distance matrix (EDM) is a matrix of pairwise distance between $d$ points in Euclidean space [51]. In applications such as wireless sensor networks, estimation of unknown distances, i.e., completing the EDM is often required. We
|
| 261 |
+
|
| 262 |
+
emphasize that this loss function is a pairwise loss, meaning that each measurement indexes multiple elements of the ground truth matrix.
|
| 263 |
+
|
| 264 |
+
To demonstrate the efficacy of ScaledSGD, we conduct two experiments where $D$ is well-conditioned and ill-conditioned respectively: Experiment 1. We uniformly sample 30 points in a cube center at origin with side length 2, and use them to compute the ground truth EDM $D$ . In this case, each row $x_{i} \in \mathbb{R}^{3}$ corresponds to the coordinates of the $i$ -th sample. The corresponding matrix $X \in \mathbb{R}^{30 \times 3}$ is well-conditioned because of the uniform sampling. Experiment 2. The ground truth EDM is generated with 25 samples lie in the same cube in experiment 1, and 5 samples lie far away from the cube. These five outliers make the corresponding $X$ become ill-conditioned.
|
| 265 |
+
|
| 266 |
+

|
| 267 |
+
|
| 268 |
+

|
| 269 |
+
|
| 270 |
+

|
| 271 |
+
Figure 2: Euclidean distance matrix (EDM) completion. We compare the convergence rate of ScaledSGD and SGD for EDM completion for two sets of samples. (Upper right) 30 samples are uniformly distributed in the pink cube center at origin. (Upper left) 25 samples (in blue) are uniformly distributed in the cube, 5 outlier samples (in orange) are outside of the cube. (Lower left) Sample uniformly in cube. (Lower right) Sample with outliers.
|
| 272 |
+
|
| 273 |
+

|
| 274 |
+
|
| 275 |
+
Item-item collaborative filtering (CF). In the task of item-item collaborative filtering (CF), the ground truth $M$ is a $d \times d$ matrix where $d$ is the number of items we wish to rank and the $i, j$ -th of $M$ is a similarity measure between the items. Our goal is to learn a low-rank matrix that preserves the ranking of similarity between the items. For instance, given a pairwise sample $(i, j, k)$ , if item $i$ is more similar to item $j$ than item $k$ , then $M_{ij} > M_{ik}$ . We want to learn a low-rank matrix that also has this property, i.e., the $i, j$ -th entry is greater than the $i, k$ -th entry.
|
| 276 |
+
|
| 277 |
+
To gauge the scalability of ScaledSGD on a huge-scale real-world dataset, we perform simulation on item-item collaborative filtering using a $62,000 \times 62,000$ item-item matrix $M$ obtained from MovieLens25M dataset. The CF model is trained using Bayesian Personalized Ranking (BRP) loss [1] on a training set, which consists of 100 million pairwise samples in $M$ . The performance of CF model is evaluated using Area Under the ROC Curve (AUC) score [1] on a test set, which consists of 10 million pairwise samples in $M$ . The BPR loss is a widely used loss function in the context of collaborative filtering for the task of personalized recommendation, and the AUC score is
|
| 278 |
+
|
| 279 |
+
a popular evaluation metric to measure the accuracy of the recommendation system. We defer the detail definition of BPR loss and AUC score to Appendix A.4.
|
| 280 |
+
|
| 281 |
+
Figure 3 plots the training BPR loss and testing AUC score within the first epoch (filled with red) and the second epoch (filled with blue). In order to measure the efficacy of ScaledSGD, we compare its testing AUC score against a standard baseline called the NP-Maximum [1], which is the best possible AUC score by non-personalized ranking methods. For a rigorous definition, see Appendix A.4.
|
| 282 |
+
|
| 283 |
+
We emphasize two important points in the Figure 3. First, the percentage of training samples needed for ScaledSGD to achieve the same testing AUC scores as NP-Maximum is roughly 4 times smaller than SGD. Though both ScaledSGD and SGD are able to achieve higher AUC score than NP-Maximum before finishing the first epoch, ScaledSGD achieve the same AUC score as NP-Maximum after training on $11\%$ of training samples while SGD requires $46\%$ of them. We note that in this experiment, the size of the training set is 100 million, this means that SGD would require 35 million more iterations than ScaledSGD before it can reach NP-Maximum.
|
| 284 |
+
|
| 285 |
+
Second, the percentage of training samples needed for ScaledSGD to converge after the first epoch is roughly 5 times smaller than SGD. Given that both ScaledSGD and SGD converge to AUC score at around 0.9 within the second epoch (area filled with blue), we indicate the percentage of training samples when both algorithms reach 0.9 AUC score in Figure 3. As expected, ScaledSGD is able to converge using fewer samples than SGD, with only $16\%$ of training samples. SGD, on the other hand, requires $81\%$ training samples.
|
| 286 |
+
|
| 287 |
+

|
| 288 |
+
Figure 3: Huge-scale item-item collaborative filtering. (MovieLens25M dataset with $|\Omega_{\text{train}}| = 100$ million and $|\Omega_{\text{test}}| = 10$ million pairwise measurements). We compare the training BPR loss and testing AUC score of ScaledSGD and SGD. (Left) Training BPR loss on the training set $\Omega_{\text{train}}$ . (Right) Testing AUC score on the test set $\Omega_{\text{test}}$ .
|
| 289 |
+
|
| 290 |
+

|
| 291 |
+
|
| 292 |
+
# 6 Conclusions
|
| 293 |
+
|
| 294 |
+
We propose an algorithm called ScaledSGD for huge scale online matrix completion. For the nonconvex approach to solving matrix completion, ill-conditioning in the ground truth causes SGD to slow down significantly. ScaledSGD preserves all the favorable qualities of SGD while making it immune to ill-conditioning. For the RMSE loss, we prove that with an initial point close to the ground truth, ScaledSGD converges to an $\epsilon$ -accurate solution in $O(\log(1/\epsilon))$ iterations, independent of the condition number $\kappa$ . We also run numerical experiments on a wide range of other loss functions commonly used in applications such as collaborative filtering, distance matrix recovery, etc. We find that ScaledSGD achieves similar acceleration on these losses, which means that it is widely applicable to many real problems. It remains future work to provide rigorous justification for these observations.
|
| 295 |
+
|
| 296 |
+
# Acknowledgments
|
| 297 |
+
|
| 298 |
+
The authors thank Salar Fattahi for helpful discussions and feedback on an earlier draft. Financial support for this work was provided in part by the NSF CAREER Award ECCS-2047462 and in part by C3.ai Inc. and the Microsoft Corporation via the C3.ai Digital Transformation Institute.
|
| 299 |
+
|
| 300 |
+
# References
|
| 301 |
+
|
| 302 |
+
[1] Steffen Rendle, Christoph Freudenthaler, Zeno Gantner, and Lars Schmidt-Thieme. Bpr: Bayesian personalized ranking from implicit feedback. arXiv preprint arXiv:1205.2618, 2012.
|
| 303 |
+
[2] Benjamin Recht, Christopher Re, Stephen Wright, and Feng Niu. Hogwild!: A lock-free approach to parallelizing stochastic gradient descent. Advances in neural information processing systems, 24, 2011.
|
| 304 |
+
[3] Qinqing Zheng and John Lafferty. Convergence analysis for rectangular matrix completion using burer-monteiro factorization and gradient descent. arXiv preprint arXiv:1605.07051, 2016.
|
| 305 |
+
[4] Tian Tong, Cong Ma, Ashley Prater-Bennette, Erin Tripp, and Yuejie Chi. Scaling and scalability: Provable nonconvex low-rank tensor completion. In International Conference on Artificial Intelligence and Statistics, pages 2607-2617. PMLR, 2022.
|
| 306 |
+
[5] Jialun Zhang, Salar Fattahi, and Richard Y Zhang. Preconditioned gradient descent for overparameterized nonconvex matrix factorization. Advances in Neural Information Processing Systems, 34:5985-5996, 2021.
|
| 307 |
+
[6] Jiacheng Zhuo, Jeongyeol Kwon, Nhat Ho, and Constantine Caramanis. On the computational and statistical complexity of over-parameterized matrix sensing. arXiv preprint arXiv:2102.02756, 2021.
|
| 308 |
+
[7] Michal Kosinski, David Stillwell, and Thore Graepel. Private traits and attributes are predictable from digital records of human behavior. Proceedings of the national academy of sciences, 110(15):5802-5805, 2013.
|
| 309 |
+
[8] Alexander Cloninger, Wojciech Czaja, Ruiliang Bai, and Peter J Basser. Solving 2d fredholm integral from incomplete measurements using compressive sensing. SIAM journal on imaging sciences, 7(3):1775-1798, 2014.
|
| 310 |
+
[9] Tian Tong, Cong Ma, and Yuejie Chi. Accelerating ill-conditioned low-rank matrix estimation via scaled gradient descent. Journal of Machine Learning Research, 22(150):1-63, 2021.
|
| 311 |
+
[10] Chi Jin, Sham M Kakade, and Praneeth Netrapalli. Provable efficient online matrix completion via non-convex stochastic gradient descent. Advances in Neural Information Processing Systems, 29, 2016.
|
| 312 |
+
[11] Emmanuel J Candes and Yaniv Plan. Matrix completion with noise. Proceedings of the IEEE, 98(6):925-936, 2010.
|
| 313 |
+
[12] Emmanuel J Candès and Terence Tao. The power of convex relaxation: Near-optimal matrix completion. IEEE Transactions on Information Theory, 56(5):2053-2080, 2010.
|
| 314 |
+
[13] Benjamin Recht, Maryam Fazel, and Pablo A Parrilo. Guaranteed minimum-rank solutions of linear matrix equations via nuclear norm minimization. SIAM review, 52(3):471-501, 2010.
|
| 315 |
+
[14] Nathan Srebro and Adi Shraibman. Rank, trace-norm and max-norm. In International Conference on Computational Learning Theory, pages 545-560. Springer, 2005.
|
| 316 |
+
[15] Sahand Negahban and Martin J Wainwright. Restricted strong convexity and weighted matrix completion: Optimal bounds with noise. The Journal of Machine Learning Research, 13(1): 1665-1697, 2012.
|
| 317 |
+
|
| 318 |
+
[16] Samuel Burer and Renato DC Monteiro. A nonlinear programming algorithm for solving semidefinite programs via low-rank factorization. Mathematical Programming, 95(2):329-357, 2003.
|
| 319 |
+
[17] Prateek Jain, Praneeth Netrapalli, and Sujay Sanghavi. Low-rank matrix completion using alternating minimization. In Proceedings of the forty-fifth annual ACM symposium on Theory of computing, pages 665-674, 2013.
|
| 320 |
+
[18] Moritz Hardt and Mary Wootters. Fast matrix completion without the condition number. In Conference on learning theory, pages 638-678. PMLR, 2014.
|
| 321 |
+
[19] Moritz Hardt. Understanding alternating minimization for matrix completion. In 2014 IEEE 55th Annual Symposium on Foundations of Computer Science, pages 651-660. IEEE, 2014.
|
| 322 |
+
[20] Ruoyu Sun and Zhi-Quan Luo. Guaranteed matrix completion via non-convex factorization. IEEE Transactions on Information Theory, 62(11):6535–6579, 2016.
|
| 323 |
+
[21] Yudong Chen and Martin J Wainwright. Fast low-rank estimation by projected gradient descent: General statistical and algorithmic guarantees. arXiv preprint arXiv:1509.03025, 2015.
|
| 324 |
+
[22] Prateek Jain and Praneeth Netrapalli. Fast exact matrix completion with finite samples. In Conference on Learning Theory, pages 1007-1034. PMLR, 2015.
|
| 325 |
+
[23] Stephen Tu, Ross Boczar, Max Simchowitz, Mahdi Soltanolkotabi, and Ben Recht. Low-rank solutions of linear matrix equations via procrustes flow. In International Conference on Machine Learning, pages 964-973. PMLR, 2016.
|
| 326 |
+
[24] Srinadh Bhojanapalli, Anastasios Kyrillidis, and Sujay Sanghavi. Dropping convexity for faster semi-definite optimization. In Conference on Learning Theory, pages 530-582. PMLR, 2016.
|
| 327 |
+
[25] Emmanuel J Candes, Xiaodong Li, and Mahdi Soltanolkotabi. Phase retrieval via wirtinger flow: Theory and algorithms. IEEE Transactions on Information Theory, 61(4):1985-2007, 2015.
|
| 328 |
+
[26] Jianhao Ma and Salar Fattahi. Implicit regularization of sub-gradient method in robust matrix recovery: Don't be afraid of outliers. arXiv preprint arXiv:2102.02969, 2021.
|
| 329 |
+
[27] Srinadh Bhojanapalli, Behnam Neyshabur, and Nathan Srebro. Global optimality of local search for low rank matrix recovery. arXiv preprint arXiv:1605.07221, 2016.
|
| 330 |
+
[28] Qiuwei Li, Zhihui Zhu, and Gongguo Tang. The non-convex geometry of low-rank matrix optimization. Information and Inference: A Journal of the IMA, 8(1):51-96, 2019.
|
| 331 |
+
[29] Ju Sun, Qing Qu, and John Wright. A geometric analysis of phase retrieval. Foundations of Computational Mathematics, 18(5):1131-1198, 2018.
|
| 332 |
+
[30] Rong Ge, Jason D Lee, and Tengyu Ma. Matrix completion has no spurious local minimum. arXiv preprint arXiv:1605.07272, 2016.
|
| 333 |
+
[31] Rong Ge, Chi Jin, and Yi Zheng. No spurious local minima in nonconvex low rank problems: A unified geometric analysis. In International Conference on Machine Learning, pages 1233-1242. PMLR, 2017.
|
| 334 |
+
[32] Ji Chen and Xiaodong Li. Memory-efficient kernel pca via partial matrix sampling and nonconvex optimization: a model-free analysis of local minima. arXiv preprint arXiv:1711.01742, 2017.
|
| 335 |
+
[33] Richard Y Zhang, Somayeh Sojoudi, and Javad Lavaei. Sharp restricted isometry bounds for the inexistence of spurious local minima in nonconvex matrix recovery. Journal of Machine Learning Research, 20(114):1-34, 2019.
|
| 336 |
+
[34] Richard Y Zhang. Sharp global guarantees for nonconvex low-rank matrix recovery in the overparameterized regime. arXiv preprint arXiv:2104.10790, 2021.
|
| 337 |
+
|
| 338 |
+
[35] Cédric Josz and Lexiao Lai. Nonsmooth rank-one matrix factorization landscape. Optimization Letters, pages 1-21, 2021.
|
| 339 |
+
[36] Raef Bassily, Mikhail Belkin, and Siyuan Ma. On exponential convergence of sgd in nonconvex over-parametrized learning. arXiv preprint arXiv:1811.02564, 2018.
|
| 340 |
+
[37] Sharan Vaswani, Francis Bach, and Mark Schmidt. Fast and faster convergence of sgd for overparameterized models and an accelerated perceptron. In The 22nd International Conference on Artificial Intelligence and Statistics, pages 1195-1204. PMLR, 2019.
|
| 341 |
+
[38] Robert Mansel Gower, Nicolas Loizou, Xun Qian, Alibek Sailanbayev, Egor Shulgin, and Peter Richtárik. Sgd: General analysis and improved rates. In International Conference on Machine Learning, pages 5200-5209. PMLR, 2019.
|
| 342 |
+
[39] Yuege Xie, Xiaoxia Wu, and Rachel Ward. Linear convergence of adaptive stochastic gradient descent. In International Conference on Artificial Intelligence and Statistics, pages 1475-1485. PMLR, 2020.
|
| 343 |
+
[40] Tian Tong, Cong Ma, and Yuejie Chi. Low-rank matrix recovery with scaled subgradient methods: Fast and robust convergence without the condition number. IEEE Transactions on Signal Processing, 69:2396-2409, 2021.
|
| 344 |
+
[41] Christian Kümmerle and Claudio M Verdun. A scalable second order method for ill-conditioned matrix completion from few samples. In International Conference on Machine Learning, pages 5872-5883. PMLR, 2021.
|
| 345 |
+
[42] Emmanuel J Candès and Benjamin Recht. Exact matrix completion via convex optimization. Foundations of Computational mathematics, 9(6):717-772, 2009.
|
| 346 |
+
[43] Benjamin Recht. A simpler approach to matrix completion. Journal of Machine Learning Research, 12(12), 2011.
|
| 347 |
+
[44] Qinqing Zheng and John Lafferty. A convergent gradient descent algorithm for rank minimization and semidefinite programming from random linear measurements. Advances in Neural Information Processing Systems, 28, 2015.
|
| 348 |
+
[45] Yuejie Chi, Yue M Lu, and Yuxin Chen. Nonconvex optimization meets low-rank matrix factorization: An overview. IEEE Transactions on Signal Processing, 67(20):5239-5269, 2019.
|
| 349 |
+
[46] Cong Ma, Kaizheng Wang, Yuejie Chi, and Yuxin Chen. Implicit regularization in nonconvex statistical estimation: Gradient descent converges linearly for phase retrieval and matrix completion. In International Conference on Machine Learning, pages 3345-3354. PMLR, 2018.
|
| 350 |
+
[47] Jason D Lee, Ioannis Panageas, Georgios Piliouras, Max Simchowitz, Michael I Jordan, and Benjamin Recht. First-order methods almost always avoid strict saddle points. Mathematical programming, 176(1):311-337, 2019.
|
| 351 |
+
[48] Chi Jin, Rong Ge, Praneeth Netrapalli, Sham M Kakade, and Michael I Jordan. How to escape saddle points efficiently. In International Conference on Machine Learning, pages 1724-1732. PMLR, 2017.
|
| 352 |
+
[49] Chi Jin, Praneeth Netrapalli, Rong Ge, Sham M Kakade, and Michael I Jordan. On nonconvex optimization for machine learning: Gradients, stochasticity, and saddle points. Journal of the ACM (JACM), 68(2):1-29, 2021.
|
| 353 |
+
[50] F. Maxwell Harper and Joseph A. Konstan. The movielens datasets: History and context. ACM Trans. Interact. Intell. Syst., 5(4), dec 2015. ISSN 2160-6455. doi: 10.1145/2827872. URL https://doi.org/10.1145/2827872.
|
| 354 |
+
[51] Ivan Dokmanic, Reza Parhizkar, Juri Ranieri, and Martin Vetterli. Euclidean distance matrices: essential theory, algorithms, and applications. IEEE Signal Processing Magazine, 32(6):12-30, 2015.
|
| 355 |
+
|
| 356 |
+
[52] James Davidson, Benjamin Liebold, Junning Liu, Palash Nandy, Taylor Van Vleet, Ullas Gargi, Sujoy Gupta, Yu He, Mike Lambert, Blake Livingston, et al. The youtube video recommendation system. In Proceedings of the fourth ACM conference on Recommender systems, pages 293-296, 2010.
|
| 357 |
+
[53] Greg Linden, Brent Smith, and Jeremy York. Amazon.com recommendations: Item-to-item collaborative filtering. IEEE Internet computing, 7(1):76-80, 2003.
|
| 358 |
+
[54] Brent Smith and Greg Linden. Two decades of recommender systems at amazon.com. IEEE internet computing, 21(3):12-18, 2017.
|
| 359 |
+
[55] Mark A Davenport, Yaniv Plan, Ewout Van Den Berg, and Mary Wootters. 1-bit matrix completion. Information and Inference: A Journal of the IMA, 3(3):189-223, 2014.
|
acceleratingsgdforhighlyillconditionedhugescaleonlinematrixcompletion/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:029481da407bbecb02ba3feb3b5cc71cff4d0041f07717482a50e02a98c11496
|
| 3 |
+
size 433493
|
acceleratingsgdforhighlyillconditionedhugescaleonlinematrixcompletion/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:649b029fae2ef625a6ff37e179c69ca209d051e7f560fba2864893f4388009d3
|
| 3 |
+
size 586503
|
acceleratingsparseconvolutionwithcolumnvectorwisesparsity/ccf4c0da-5b53-47d7-8863-b256ae5714be_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a9820f6f3aaf79c223b8ae3cd8597776e268d3533b6dd08d8df5442999caaaa6
|
| 3 |
+
size 59986
|
acceleratingsparseconvolutionwithcolumnvectorwisesparsity/ccf4c0da-5b53-47d7-8863-b256ae5714be_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eef197f3d992f7d22ac89c53075894a93a07616e599b588df290467858705b05
|
| 3 |
+
size 75251
|
acceleratingsparseconvolutionwithcolumnvectorwisesparsity/ccf4c0da-5b53-47d7-8863-b256ae5714be_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7d8f2b3afc39c8b3c254f20e61b5087dc32a1fba966f7d221eb524edac2f4284
|
| 3 |
+
size 516303
|
acceleratingsparseconvolutionwithcolumnvectorwisesparsity/full.md
ADDED
|
@@ -0,0 +1,278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerating Sparse Convolution with Column Vector-Wise Sparsity
|
| 2 |
+
|
| 3 |
+
Yijun Tan $^{1,2}$ Kai Han $^{3}$ Kang Zhao $^{3}$ Xianzhi Yu $^{3}$ Zidong Du $^{1}$ Yunji Chen $^{1,2}$ Yunhe Wang $^{3,*}$ Jun Yao $^{3}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ SKL of Processors, Institute of Computing Technology, CAS
|
| 6 |
+
|
| 7 |
+
$^{2}$ University of Chinese Academy of Sciences
|
| 8 |
+
|
| 9 |
+
<sup>3</sup>Huawei Noah's Ark Lab
|
| 10 |
+
|
| 11 |
+
tanyj1998@gmail.com, {kai.han,yunhe.wang}@huawei.com
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Weight sparsity is a promising approach to reducing the model size and computation cost of convolutional neural networks (CNNs). Nevertheless, non-zero weights often distribute randomly in sparse CNN models, introducing enormous difficulty in obtaining actual speedup on common hardware (e.g., GPU) over their dense counterparts. Existing acceleration solutions either require hardware modifications for irregular memory access support or rely on a partially structured sparsity pattern. Neither of these methods is capable of achieving fruitful speedup on convolution layers.
|
| 16 |
+
|
| 17 |
+
In this work, we propose an algorithm-software co-designed sparse convolution based on a novel out-vector-wise (OVW) sparse pattern. Building on the insight that vertical vector integrity can preserve continuous memory access in IM2COL, the OVW pattern treats a $V \times 1$ vector as unit. To reduce the error caused by sparsity, we propose an equivalent transformation process, i.e., clustering-based channel permutation, to gather similar rows together. Experimental evaluations demonstrate that our method achieves a $1.7 \times$ and $3.2 \times$ speedup over the SOTA solution and the dense convolution of ResNet50 on NVIDIA V100 at $75\%$ sparsity, respectively, with only negligible accuracy loss. Moreover, compared to the SOTA solution that achieves speedups only on data with $60\%$ sparsity or more, our method begins to obtain speedups on data with only $10\%$ sparsity.
|
| 18 |
+
|
| 19 |
+
# 1 Introduction
|
| 20 |
+
|
| 21 |
+
Recently, convolutional neural networks (CNNs) have yielded astonishing results in many important domains such as vision [8], and language [18]. With CNN algorithm developing rapidly, CNN models' storage and computing overhead grow exponentially. To significantly reduce both the computations and memory access, weight sparsity has been adopted as a promising approach to improve hardware efficiency.
|
| 22 |
+
|
| 23 |
+
Despite the success in reducing computations and data access, unconstrained, fine-grained sparsity fails to bring practical speedups on common GPUs. This is because unstructured sparsity generally induces tremendous access conflicts and load unbalances, which lowers GPU's performance. For example, on NVIDIA V100, the sparse matrix multiplication performs not faster than the dense matrix multiplication until the sparsity ratio is over $95\%$ [17, 3].
|
| 24 |
+
|
| 25 |
+
Unfortunately, existing solutions either require hardware modifications or only partially address the problem by being constrained with structured, coarse-grained sparsity, resulting in high accuracy loss. The former is to leverage the sparse matrix-matrix multiplication (e.g., SPMM) operation
|
| 26 |
+
|
| 27 |
+
on GPU. While directly applying SPMM on sparse CNNs can run even slower than dense CNNs [17], productive SPMM acceleration solutions[21, 14] often require their unique need for dedicated hardware support to overcome discontinuous memory access, which is impractical. The latter is to leverage the general matrix-matrix multiplication (e.g., GEMM) operation on GPU.
|
| 28 |
+
|
| 29 |
+
Recent works focus on structured sparsity with different sparse patterns to gain speedup benefits from weight sparsity. Block sparsity[4] manages to restore the spatial locality of matrices to a large extent, at the cost of a strict restriction on the non-zero Balanced sparsity[2, 19, 15], newly introduced on NVIDIA A100 GPU[14], however, lacks flexibility in choosing model sparsity rate that only exact $50\%$ sparsity ratio could be deployed on this dedicated hardware. These efforts achieve some palpable acceleration compared to dense GEMM operation, but they all struggle to attain similar results on convolution layers which have proven to be a greater challenge.
|
| 30 |
+
|
| 31 |
+
To tackle these problems, here we present a novel sparse convolution acceleration algorithm featured with column-wise sparsity and implicit matrix multiplication. Specifically, the proposed column-wise sparsity is dubbed the out-vector-wise (OVW) sparse pattern since the pattern sparsifies a matrix by treating a $\mathrm{V} \times 1$ vector as an entirety, as shown in Figure 1. During convolution, the OVW pattern can hold both strong memory consistency and high data reuse rates of input matrices using implicit matrix multiplication. Moreover, we propose to employ channel permutation and row clustering to improve the accuracy of OVW sparse pattern-based CNNs. Besides, a GPU kernel is carefully designed to ensure that our OVW sparse pattern is supported by common GPUs. With these efforts, our algorithm predominantly outperforms other sparse convolution acceleration algorithms on various CNN models. More importantly, our algorithm can achieve the acceleration of convolutions even with a very low weight sparsity ratio, e.g., $10\%$ . Instead, prior arts can only work fine when the weight sparsity ratio is over $60\%$ .
|
| 32 |
+
|
| 33 |
+
The main contributions of this paper are listed as follows:
|
| 34 |
+
|
| 35 |
+
- We propose a vector-based sparsity pattern, i.e., the OVW pattern to balance inference accuracy loss and computation efficiency in a hardware-friendly manner.
|
| 36 |
+
- We implement a new GPU convolution kernel to support the OVW pattern. The kernel utilizes the technique of extracting filter location information which can further reduce inference runtime.
|
| 37 |
+
- We propose a heuristic clustering method to obtain an appropriate channel permutation for reducing accuracy drop during weight pruning. This channel permutation operation is conducted offline, which does not affect inference time.
|
| 38 |
+
- Our GPU kernel can accelerate convolution at a wide range of model sparsity rates. With few accuracy loss, the kernel can speed up ResNet50 by $1.7 \times$ and $3.2 \times$ , respectively over the SOTA solution and the dense cuDNN convolution on NVIDIA V100 GPU at $75\%$ sparsity level.
|
| 39 |
+
|
| 40 |
+
# 2 Related work
|
| 41 |
+
|
| 42 |
+
# 2.1 Software-only Acceleration For Sparse CNN Model
|
| 43 |
+
|
| 44 |
+
Weight pruning has been a popular technique for efficient CNN inference. Early studies[7, 6] show that removing a large proportion of unimportant connections in CNN models does not necessarily lead to inference accuracy impairment. Reducing parameters helps exploit redundancy in CNN models, which requires fewer computations and data accesses. However, for CNN inference, weight pruned sparse CNNs usually perform worse than dense counterparts, unless the CNN sparsity ratios are substantial, i.e., very sparse CNNs.
|
| 45 |
+
|
| 46 |
+
To address this issue, methods other than unstructured sparsity are exploited. Researchers exploit various constraints on sparsity patterns in exchange for computation efficiency. A primary domain of this region is filter pruning, where parameters of an entire filter are pruned or kept as a whole. However, this direct modification of channel size suffers a sharp accuracy drop [10, 11, 16]. Moderate sparsity patterns are also examined, such as block sparsity[17], which is proposed to elevate the spatial locality of sparse matrices. But this feature can achieve speedup only when sparsity ratios are larger than $70\%$ . Tile-wise sparsity[5] endows weight patterns with more flexibility. Compared to previous methods, balanced sparsity[15, 19] is more feasible with recent support from NVIDIA A100 GPU which
|
| 47 |
+
|
| 48 |
+

|
| 49 |
+
Element-Wise
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
Inter-Vector-Wise
|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
Outer-Vector-Wise
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
Block-Wise
|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
Pruned
|
| 62 |
+
|
| 63 |
+

|
| 64 |
+
Unpruned
|
| 65 |
+
Figure 1: Comparison of four patterns with $50\%$ sparsity.
|
| 66 |
+
|
| 67 |
+
directly optimizes 2:4 balanced sparsity. Recent work Shfl_BW[9] uses matrix transformation to utilize block sparsity's computation efficiency while removing some of its constraints. In this way, the threshold of weight sparsity ratio that enables acceleration is reduced from $70\%$ to $60\%$ . Different from prior works which only work for very sparse matrices, in this paper, our algorithm can achieve speedup when the sparsity ratio is only $10\%$ .
|
| 68 |
+
|
| 69 |
+
# 2.2 GEMM Based Convolution
|
| 70 |
+
|
| 71 |
+
GEMM has been adopted widely to perform convolution and it performs significantly better than other convolution methods such as FFT, and Winograd on modern commercial hardware accelerators such as TPUs and GPUs. The GEMM-based algorithms could be further divided into two types: explicit matrix multiplication and implicit matrix multiplication. Explicit matrix multiplication uses IM2COL to adapt inputs for GEMM. IM2COL is an IO-intensive operation, which brings in significant workload other than computation cost[1]. Implicit matrix multiplication merges these operations for more efficient memory accesses. It updates pointers of feature map in shared memory and performs tile-based matrix multiplication simultaneously. On NVIDIA V100 GPUs, explicit GEMM convolutions consume on average $120\%$ , $126\%$ , and $142\%$ in time compared to implicit GEMM-based convolution on convolution layers of Alexnet, Resnet and Googlenet [20].
|
| 72 |
+
|
| 73 |
+
Yet, few studies have investigated the sparse convolution with implicit GEMM. Performing sparse convolution by GEMM is always through explicit matrix multiplication rather than implicit matrix multiplication. This is because of the IM2COL operation which is extremely difficult if not impossible for sparse matrix multiplication, as sparse matrices are compactly compressed and irregularly stored. As a result, implicit GEMM who does not have to suffer from the costly IM2COL operation has the potential to achieve higher efficiency for sprase convolution. In this paper, we investigate the implicit GEMM-based sparse convolution to leverage the high-performance GEMMs on existing hardware.
|
| 74 |
+
|
| 75 |
+
# 3 Accelerating Sparse Convolution
|
| 76 |
+
|
| 77 |
+
In this section, we introduce our proposed sparse convolution algorithm, including the OVW pattern of sparsity for the proposed sparse convolution, its advantage in convolution computation and our detailed implementation on GPU.
|
| 78 |
+
|
| 79 |
+
# 3.1 The OVW Pattern
|
| 80 |
+
|
| 81 |
+
The OVW pattern belongs to the vector-wise(VW) pattern which is one of the three different pattern categories of sparsity in matrix. As shown in Fig 1, the first sparsity pattern is the element-wise(EW) pattern, corresponding to unstructured pruning, which evaluates each parameter individually. Having imposed no constraint on pruning, this pattern succeeded at model flexibility but struggled at actual acceleration due to its irregular memory accesses. The second sparsity pattern is the VW pattern, which can be further divided into the inter-vector-wise(IVW) and the OVW pattern. They both treat a $\mathrm{V} \times 1$ vector as an entirety while the IVW pattern prunes a certain proportion of weights inside each vector and the OVW pattern focuses on the entire vector of weights. The third pattern is the block-wise(BW) pattern, and its minimum pruning granularity is a $\mathrm{V} \times \mathrm{V}$ block. This pattern has the highest computation efficiency, but its inference accuracy loss is high as well. In this work, we
|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
Figure 2: Illustration of GPU conv kernel. The solid lines are the actual computation flow and the dotted lines are processing pointers and descriptors.
|
| 85 |
+
|
| 86 |
+
use the OVW pattern since it shares the advantages of the VW pattern, which balances computation efficiency of BW and network accuracy of EW. The Shfl_BW pattern is actually a variant of this pattern, who uses an extra channel reordering procedure to gather block-wise pattern utilities.
|
| 87 |
+
|
| 88 |
+
# 3.2 The OVW Pattern's Advantage in Convolution Computation
|
| 89 |
+
|
| 90 |
+
The biggest advantage of the OVW pattern is that it fits the way an efficient dense warp-level GEMM instruction fetches input data. This instruction is the key contributor to most of the sparse matrix acceleration methods. The reasons are as followed.
|
| 91 |
+
|
| 92 |
+
As shown in Fig 2, the OVW pattern-based sparse convolution can be broken down into multiple dense matrix multiplications of smaller sizes. During the loading process of a dense convolution procedure, a column of filter data loaded into shared memory shares a specific position on the filter map. In the meantime, a continuous block in the feature map is loaded accordingly to prepare convolution. Several columns fetched from the filters together form the left input matrix of the block matrix multiplication and their corresponding feature data blocks form the right input matrix. Noticing that this forming process of input matrix does not require the loaded filter columns to be continuous, meaning that efficient dense operations can also be performed by grouping some unrelated columns. Based on this observation, we could take in multiple columns of irrelevant column indices from the OVW pattern sparse matrix and handle them in the same way as the dense GEMM operation. This similarity between our convolution algorithm and the implicit GEMM convolution guarantees us similar overall computation efficiency.
|
| 93 |
+
|
| 94 |
+
What's more, other sparsity such as the Shfl_BW pattern who is actually a variant of this pattern, uses an extra channel reordering procedure to gather block-wise pattern utilities. The OVW pattern, however, could be directly used in our convolution algorithm which denotes a higher acceleration potential. Compared to N:M sparsity, one of the IVW pattern, our approach does not need specialized hardware supports and it is much more elastic in selecting the sparsity ratio of each layer. Besides, the IVW pattern still faces the memory-bound issue, because the amount of redundant data that needs to be loaded into shared memory each time is equal to its sparsity ratio.
|
| 95 |
+
|
| 96 |
+
# 3.3 GPU Sparse Kernel Implementation
|
| 97 |
+
|
| 98 |
+
As shown in Algorithm 1, our convolution kernel implementation contains three steps. The first step is to get the corresponding feature pointer offsets through original filter structure information recovery. These parts of calculation are done by the function Cal_Tread_Offset. The second step is to
|
| 99 |
+
|
| 100 |
+
load data from both input matrices into shared memory. Some threads use the function load_column to load a column vector of length $TM$ from filters to shared memory with $DY$ threads. Threads then use the function load_row to load a row vector from the feature map in the same way. The third step calls warp matrix multiplication operators. When calculating matrix $A_{M\times K} \times B_{K\times N} = C_{M\times N}$ , a three dimension parallelism $(DX, DY, DZ)$ thread is employed. $(DX, DY, DZ)$ threads loop along $(M, N, K)$ individually. Several threads together use the function Warp_MMA to multiply the loaded matrices and write back after results accumulation is finished. Each thread computes a tile matrix multiplication of the size $(TM, TN, TK)$ .
|
| 101 |
+
|
| 102 |
+
The procedure of calculating the exact pointer offset of the feature map for corresponding filter columns contains two steps. During a convolution computation, the corresponding location of $a_{ij} \times b_{jk}$ is not obvious. Hence after loading $a_{ij}$ into shared memory, firstly, the GPU kernel has to fetch the column indices of $a_{ij}$ in the original filter. The column indices are then used to recover the exact position of this column in the filter map. Subsequently, the location offset of the corresponding data in the feature map is calculated, after which $b_{jk}$ can be finally located in the feature map.
|
| 103 |
+
|
| 104 |
+
Algorithm 1: Sparse convolution computation
|
| 105 |
+
```csv
|
| 106 |
+
Data: row_idx[], filter[] input] Result: output]
|
| 107 |
+
1 Shared memory $A[TM][TK],B[TK][TN],C[TM][TN];$
|
| 108 |
+
2 for Thread idx=1 to DX, idy=1 to DY, idz=1 to DZ do
|
| 109 |
+
3 offset $\equiv$ Cal Thread Offset(row_idx[]idx,idy,idz);
|
| 110 |
+
4 if idx $<$ TK then
|
| 111 |
+
5 | Load_column(A,filter[iz][idx],TM,idy);
|
| 112 |
+
6 end
|
| 113 |
+
7 if idy $<$ TK then
|
| 114 |
+
8 | Load_row(B, input[offset],TN,idx);
|
| 115 |
+
9 end
|
| 116 |
+
10 Syncthreads();
|
| 117 |
+
11 Warp_MMA(A,B,idx,idy);
|
| 118 |
+
12 Accumulate_Results(C);
|
| 119 |
+
13 Store(output,C);
|
| 120 |
+
14 end
|
| 121 |
+
15 Return output;
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
If only the column indices of sparse matrices are stored, their location information has to be recovered each time before the corresponding activation is loaded. Like the location offset in the feature map, the corresponding data location in the filter map could be prepared in advance. Because it is a constant for each thread during the whole process. Extra storage occupation of this technique is two extra dimension indices data array of filter map, which takes merely $3\%$ total storage of a compressed model with vector length $= 64$ , and $6\%$ with vector length $= 32$ , in exchange for $10\%$ run time reduction of Resnet50's convolution layers on average. Considering that a sparse model is already highly compressed, this additional model redundancy is totally acceptable.
|
| 125 |
+
|
| 126 |
+
# 4 Pruning Algorithm
|
| 127 |
+
|
| 128 |
+
In this section, we introduce our pruning algorithm for the OVW pattern, including the channel permutation technique and our method to acquire a desired permutation order for it.
|
| 129 |
+
|
| 130 |
+
# 4.1 Channel Permutation
|
| 131 |
+
|
| 132 |
+
Our pruning method can be divided into two steps: shuffling the filter matrix rows in each layer and applying vector-wise pruning.
|
| 133 |
+
|
| 134 |
+
Here we will explain why filter permutation will do no harm to the network inference. Matrix multiplication only swaps the order in the output dimension and does not change the actual computation. Permuted operation results can be recovered through a reversed permutation of the operand output. As we only permute the output channel of each layer, the permuted order of the current layer will be
|
| 135 |
+
|
| 136 |
+

|
| 137 |
+
Figure 3: Illustration of the permutation transfer process. The permuted matrix kept a larger amount of absolute weight value after pruning at the same sparsity. Following activation is recovered by permuting the output channel of layer $\mathrm{k}$ and the input channel of layer $\mathrm{k} + 1$ simultaneously.
|
| 138 |
+
|
| 139 |
+
absorbed by the input channel dimension of the next GEMM-based layer(convolution or linear). Fig 3 shows one iteration for channel permutation between layer $_k$ and layer $_{k+1}$ . As we have reordered the output channel of layer $_k$ , the activation $_k$ is changed to the same order, but after we permute the input channel of layer $_{k+1}$ , activation $_{k+1}$ is restored. More weights value could be saved after permutation. The same operation is then repeated on layer $_{k+1}$ and so on until every GEMM layer in the network is permuted. This permutation transfer procedure allows us to choose an appropriate permutation row order for every filter without altering the output of the network.
|
| 140 |
+
|
| 141 |
+
The rest of the layers such as the pooling layers and the activation layers involve no modification along channel dimension thus are not affected by this process. The BN layers and the bias added at the end of convolution layers and linear layers do not produce any new permutation order, but they have to permute according to the permutation passing through.
|
| 142 |
+
|
| 143 |
+
# 4.2 Row Clustering
|
| 144 |
+
|
| 145 |
+
Algorithm 2: Row clustering
|
| 146 |
+
Data: The original weight $W$ , number of clusters $k$ , number of selected column m. Result: The reordered weight RW.
|
| 147 |
+
1 $RW =$ empty;
|
| 148 |
+
2 while W is not empty do
|
| 149 |
+
3 Sort the columns of W by column variance;
|
| 150 |
+
4 Build SampleW by selecting columns with the top- $m$ largest variances;
|
| 151 |
+
5 Get the $k$ clustered groups $G =$ Balanced_kmeans(SampleW, k);
|
| 152 |
+
6 Select the group g with maximum sum;
|
| 153 |
+
7 Append g to RW;
|
| 154 |
+
8 Remove g from W;
|
| 155 |
+
9 end
|
| 156 |
+
10 Return RW;
|
| 157 |
+
|
| 158 |
+
We use a row clustering method to obtain an appropriate permutation order. A heuristic indicator evaluating the quality of a permutation is the sum of absolute weight value being pruned, assuming that the method that the preservation of more important weights corresponds to less inference accuracy loss. An obvious route is that the weight rows with shorter distances are assigned to the same group as
|
| 159 |
+
|
| 160 |
+
Table 1: Network accuracy on Cifar100. V is the vector length of the OVW pattern. V for Vgg19 and Resnet is set to 64 for all layers. For SqueezeNet and MobileNetv2, the V value in the table is our average vector length of the whole network as we select an optimal V for each layer.
|
| 161 |
+
|
| 162 |
+
<table><tr><td colspan="2">Network</td><td>Vgg19</td><td>Resnet18</td><td>Resnet50</td><td>SqueezeNet</td><td>MobileNetv2</td></tr><tr><td>Baseline</td><td>dense</td><td>71.41</td><td>77.19</td><td>78.60</td><td>71.01</td><td>68.99</td></tr><tr><td rowspan="2">Unstructured OVW</td><td rowspan="2">80%</td><td>71.62(±0.11)</td><td>76.42(±0.04)</td><td>77.99(±0.08)</td><td>70.27(±0.15)</td><td>68.89(±0.06)</td></tr><tr><td>71.36(±0.04)</td><td>73.67(±0.35)</td><td>75.80(±0.16)</td><td>69.05(±0.12)</td><td>68.31(±0.11)</td></tr><tr><td>OVW permuted</td><td></td><td>71.46(±0.06)</td><td>74.23(±0.22)</td><td>75.99(±0.18)</td><td>69.29(±0.15)</td><td>68.52(±0.05)</td></tr><tr><td>Δ</td><td></td><td>+0.10</td><td>+0.56</td><td>+0.19</td><td>+0.24</td><td>+0.21</td></tr><tr><td>V</td><td></td><td>64</td><td>64</td><td>64</td><td>44.48</td><td>32.02</td></tr><tr><td rowspan="2">Unstructured OVW</td><td rowspan="2">90%</td><td>71.35(±0.05)</td><td>74.87(±0.07)</td><td>78.02(±0.04)</td><td>70.29(±0.16)</td><td>68.40(±0.19)</td></tr><tr><td>71.29(±0.03)</td><td>71.14(±0.10)</td><td>72.72(±0.13)</td><td>65.69(±0.59)</td><td>66.07(±0.64)</td></tr><tr><td>OVW permuted</td><td></td><td>71.37(±0.19)</td><td>72.25(±0.15)</td><td>73.26(±0.22)</td><td>65.80(±0.73)</td><td>67.46(±1.03)</td></tr><tr><td>Δ</td><td></td><td>+0.08</td><td>+1.11</td><td>+0.54</td><td>+0.11</td><td>+1.39</td></tr><tr><td>V</td><td></td><td>64</td><td>64</td><td>64</td><td>44.48</td><td>32.02</td></tr></table>
|
| 163 |
+
|
| 164 |
+
much as possible. Shfl_BW chooses the kmeans method for clustering, but after careful experiment, we found that the kmeans method does not suit this issue well. For starters, the number of elements in each group is set to be a fixed value (vector length), and kmeans requires additional operations to meet this demand. Also, the data dimension(input channel multiplies filter height and width) is too large, while the amount of data and groups is relatively small. Kmeans falls in local minima easily and the output cluster is extremely unstable. We introduced the balanced kmeans[13] to solve it and modified it to palliate both symptoms mentioned above.
|
| 165 |
+
|
| 166 |
+
Algorithm 2 shows the key steps of our algorithm. First, we construct a characteristic matrix by assembling rows with the highest variance, then cluster rows of this matrix to alleviate excessive dimension. We utilize balanced kmeans to get equal size clusters. In each iteration of balanced kmeans, instead of assigning each data vector to its nearest cluster center as the origin kmeans algorithm, a distance matrix between all the vectors and the current cluster centers is formed. We minimize the sum of distance while each cluster contains the same amount of data vector. This minimization problem can be converted to bipartite matching and we employ the Kuhn-Munkres algorithm to solve it. Secondly, for each clustering result, we only adopt the most important group under this feature to increase operation stability. This group is removed from the original matrix and then the feature matrix is reconstructed and clustered. Repeat the above steps until all data grouping completes, the permuted matrix and its permuting order is obtained.
|
| 167 |
+
|
| 168 |
+
# 5 Evaluation
|
| 169 |
+
|
| 170 |
+
# 5.1 Model Accuracy
|
| 171 |
+
|
| 172 |
+
We evaluate our method on several popular CNN models on NVIDIA V100 GPU. We only calculate the speedup of the convolution layers in the following results. Table 1 shows the accuracy of our method compared to unstructured sparsity where V stands for vector length. "OVW permuted" shows a better accuracy over "OVW non-permuted" on all CNN models.
|
| 173 |
+
|
| 174 |
+
The upper bound of V in our kernel implementation is 64 and we tend to make it as large as possible to maximize shared memory usage. However, group convolution has no filter data reuse for vector length larger than its group size. Similarly, V is set to 1 in depthwise convolution layers. Besides that, the first convolution layer of SqueezeNet has only 96 output channels. If the vector length is 64, the second tile in the output channel dimension only processes 32 rows which is only half loaded. In this case, vector length is set to 32 to maximize computation resource utilization. The V selection strategy here is to maximize V while utilizing shared memory resources as much as possible.
|
| 175 |
+
|
| 176 |
+
All the results in this table use the same fine-tuning process. We fine-tune each network for 40 epochs after pruning with the same learning rate of 0.0008. Also, each layer can hold different sparsity ratio credit to our acceleration for convolution at low sparsity ratio.
|
| 177 |
+
|
| 178 |
+

|
| 179 |
+
|
| 180 |
+

|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
Figure 4: Speed comparison between our sparsity convolution kernel and two GEMM-based convolution in cuDNN library on NVIDIA V100 GPU.
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
|
| 191 |
+
Table 2: Comparing sparsity patterns on Resnet50 for ImageNet classification. Speedup of unstructured sparsity is the same as dense because the fastest way to run it is to invoke the dense convolution kernel.
|
| 192 |
+
|
| 193 |
+
<table><tr><td>Network</td><td>Sparsity ratio</td><td>Accuracy</td><td>Speedup</td></tr><tr><td>Baseline</td><td>Dense</td><td>76.12</td><td>1×</td></tr><tr><td>Balanced sparsity (2:4)[15]</td><td>50%</td><td>76.29</td><td>1.3×</td></tr><tr><td>Shfl_BW[9]</td><td>90%</td><td>73.09</td><td>2.5×</td></tr><tr><td>OVW</td><td>30%</td><td>76.17</td><td>1.38×</td></tr><tr><td>OVW</td><td>50%</td><td>75.76</td><td>1.86×</td></tr><tr><td>OVW</td><td>70%</td><td>73.35</td><td>2.79×</td></tr></table>
|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
Figure 5: Speed up against the dense baseline on Resnet50 Cifar100 comparing to other vector-level sparse patterns on NVIDIA V100 GPU.
|
| 197 |
+
|
| 198 |
+
# 5.2 Convolution Kernel Speedup
|
| 199 |
+
|
| 200 |
+
As shown in Fig 4, we evaluate the speedup of our method on three popular CNN models. We use the cuDNN convolution operator as the dense baseline. The first three graphs in Fig 4 represent three typical convolution shapes in CNN models: small channel size with a large feature map, medium channel size with a medium feature map, and large channel size with a small feature map. Our kernel can accelerate all these types of convolution layers while exceeds at the twelfth convolution layer of Resnet50 which is the most used kind of convolution layer $4.8 \times$ and $3.87 \times$ over cuDNN on V100 at $80\%$ sparsity, vector lengths 64 and 32.
|
| 201 |
+
|
| 202 |
+
# 5.3 Comparing Different Sparsity Patterns
|
| 203 |
+
|
| 204 |
+
We replicate two vector-level sparsity pattern, balanced sparsity(NVIDIA 2:4)[15] and Shfl_BW [9] for comparison. Other vector-level sparsity patterns such as Tile-wise[5] are slower than the former two patterns. Also, these patterns lack implementation for convolution.
|
| 205 |
+
|
| 206 |
+
Table 2 shows results of Resnet50 on ImageNet directly copied from Shfl_BW paper, where an expensive method Grow and Prune[12] is used to recover its accuracy. Grow and Prune is a sparsity pattern independent method. We fine-tune our pretrained Resnet50 model for 20 epochs with the learning rate of 0.001. We lower the sparsity of our network to $70\%$ where our method demonstrates $73.35\%$ top1 accuracy and $2.79 \times$ speedup. Our method exhibits an obviously better speed-accuracy tradeoff compared to the Shfl_BW. The OVW pattern also achieves a better speedup compared to balanced sparsity while recovering the full accuracy of the original model.
|
| 207 |
+
|
| 208 |
+
To ensure fair comparison, we reproduce these results under the same setting on Resnet50 and Cifar100, as shown in Fig 5. We fine-tune each network for 40 epochs after pruning from pretrained dense models with the same learning rate of 0.0008. The OVW pattern dominates the speed-accuracy trade-off in vector-level sparsity.
|
| 209 |
+
|
| 210 |
+
# 6 Conclusion
|
| 211 |
+
|
| 212 |
+
Accelerating sparse convolution poses a greater challenge than accelerating sparse matrix multiplication. In this work, we propose a novel sparsity pattern, the OVW pattern to facilitate the sparse convolution acceleration under intact accuracy. Although, limitations exist that our method relies heavily on the hardware supports for the implicit GEMM convolution algorithm. The performance of our base dense kernel against the commodity unpublished ones is unstable. Our method does not acquire the same amount of acceleration rate on matrix multiplication and the acceleration rate of our method is subject to filter shape. Its performance also degrades in specialized convolution layers where data reuse opportunity is limited. In consideration of this, our GPU implementation still largely outperforms all sparse acceleration approaches exceedingly with a sparse pattern of similar flexibility. We hope this work can fill the vacancy of specialized sparse convolution kernel design and our methodology can inspire further research in this domain.
|
| 213 |
+
|
| 214 |
+
# Acknowledgement
|
| 215 |
+
|
| 216 |
+
We gratefully acknowledge the support of MindSpore, CANN (Compute Architecture for Neural Networks) and Ascend AI Processor used for this research.
|
| 217 |
+
|
| 218 |
+
This work is partially supported by the National Key R&D Program of China(under Grant 2017YFA0700902), the NSF of China(under Grants 61925208, 61732020, U19B2019), Strategic Priority Research Program of Chinese Academy of Science (XDB32050200), Beijing Academy of Artificial Intelligence (BAAI), CAS Project for Young Scientists in Basic Research(YSBR-029), Youth Innovation Promotion Association CAS and Xplore Prize.
|
| 219 |
+
|
| 220 |
+
# References
|
| 221 |
+
|
| 222 |
+
[1] MegEngine:A fast, scalable and easy-to-use deep learning framework. 2020.
|
| 223 |
+
[2] Cao Shijie, Zhang Chen, Yao Zhuliang, Xiao Wencong, Nie Lanshun, Zhan Dechen, Liu Yunxin, Wu Ming, Zhang Lintao. Efficient and effective sparse LSTM on FPGA with bank-
|
| 224 |
+
|
| 225 |
+
balanced sparsity // Proceedings of the 2019 ACM/SIGDA International Symposium on Field-Programmable Gate Arrays. 2019. 63-72.
|
| 226 |
+
[3] Gale Trevor, Zaharia Matei, Young Cliff, Elsen Erich. Sparse gpu kernels for deep learning // SC20: International Conference for High Performance Computing, Networking, Storage and Analysis. 2020. 1-14.
|
| 227 |
+
[4] Gray Scott, Radford Alec, Kingma Diederik P. GPU kernels for block-sparse weights // arXiv preprint arXiv:1711.09224. 2017.3.2.
|
| 228 |
+
[5] Guo Cong, Hsueh Bo Yang, Leng Jingwen, Qiu Yuxian, Guan Yue, Wang Zehuan, Jia Xiaoying, Li Xipeng, Guo Minyi, Zhu Yuhao. Accelerating sparse dnn models without hardware-support via tile-wise sparsity // SC20: International Conference for High Performance Computing, Networking, Storage and Analysis. 2020. 1-15.
|
| 229 |
+
[6] Han Song, Mao Huizi, Dally William J. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding // arXiv preprint arXiv:1510.00149. 2015.
|
| 230 |
+
[7] Han Song, Pool Jeff, Tran John, Dally William. Learning both weights and connections for efficient neural network // Advances in neural information processing systems. 2015. 28.
|
| 231 |
+
[8] He Kaiming, Zhang Xiangyu, Ren Shaoqing, Sun Jian. Deep residual learning for image recognition // Proceedings of the IEEE conference on computer vision and pattern recognition. 2016. 770-778.
|
| 232 |
+
[9] Huang Guyue, Li Haoran, Qin Minghai, Sun Fei, Din Yufei, Xie Yuan. Shfl-BW: Accelerating Deep Neural Network Inference with Tensor-Core Aware Weight Pruning // arXiv preprint arXiv:2203.05016. 2022.
|
| 233 |
+
[10] Li Hao, Kadav Asim, Durdanovic Igor, Samet Hanan, Graf Hans Peter. Pruning filters for efficient convnets // arXiv preprint arXiv:1608.08710. 2016.
|
| 234 |
+
[11] Liu Zhuang, Sun Mingjie, Zhou Tinghui, Huang Gao, Darrell Trevor. Rethinking the value of network pruning // arXiv preprint arXiv:1810.05270. 2018.
|
| 235 |
+
[12] Ma Xiaolong, Qin Minghai, Sun Fei, Hou Zejiang, Yuan Kun, Xu Yi, Wang Yanzhi, Chen Yen-Kuang, Jin Rong, Xie Yuan. Effective Model Sparsification by Scheduled Grow-and-Prune Methods // arXiv preprint arXiv:2106.09857. 2021.
|
| 236 |
+
[13] Malinen Mikko I, Franti Pasi. Balanced k-means for clustering // Joint iapr international workshops on statistical techniques in pattern recognition (spr) and structural and syntactic pattern recognition (sspr). 2014. 32-41.
|
| 237 |
+
[14] Mishra Asit, Latorre Jorge Albericio, Pool Jeff, Stotic Darko, Stotic Dusan, Venkatesh Ganesh, Yu Chong, Micikevicius Paulius. Accelerating sparse deep neural networks // arXiv preprint arXiv:2104.08378. 2021.
|
| 238 |
+
[15] Pool Jeff, Yu Chong. Channel Permutations for N: M Sparsity // Advances in Neural Information Processing Systems. 2021. 34.
|
| 239 |
+
[16] Sui Yang, Yin Miao, Xie Yi, Phan Huy, Aliari Zonouz Saman, Yuan Bo. CHIP: CChannel Independence-based Pruning for Compact Neural Networks // Advances in Neural Information Processing Systems. 2021. 34.
|
| 240 |
+
[17] Wen Wei, Wu Chunpeng, Wang Yandan, Chen Yiran, Li Hai. Learning structured sparsity in deep neural networks // Advances in neural information processing systems. 2016. 29.
|
| 241 |
+
[18] Zhang Xiang, Zhao Junbo, LeCun Yann. Character-level convolutional networks for text classification // Advances in neural information processing systems. 2015. 28.
|
| 242 |
+
[19] Zhou Aojun, Ma Yukun, Zhu Junnan, Liu Jianbo, Zhang Zhijie, Yuan Kun, Sun Wenxiu, Li Hongsheng. Learning n: m fine-grained structured sparse neural networks from scratch // arXiv preprint arXiv:2102.04010. 2021.
|
| 243 |
+
|
| 244 |
+
[20] Zhou Yangjie, Yang Mengtian, Guo Cong, Leng Jingwen, Liang Yun, Chen Quan, Guo Minyi, Zhu Yuhao. Characterizing and Demystifying the Implicit Convolution Algorithm on Commercial Matrix-Multiplication Accelerators // 2021 IEEE International Symposium on Workload Characterization (IISWC). 2021. 214-225.
|
| 245 |
+
[21] Zhu Maohua, Zhang Tao, Gu Zhenyu, Xie Yuan. Sparse tensor core: Algorithm and hardware co-design for vector-wise sparse neural networks on modern gpus // Proceedings of the 52nd Annual IEEE/ACM International Symposium on Microarchitecture. 2019. 359-371.
|
| 246 |
+
|
| 247 |
+
1. For all authors...
|
| 248 |
+
|
| 249 |
+
(a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
|
| 250 |
+
(b) Did you describe the limitations of your work? [Yes]
|
| 251 |
+
(c) Did you discuss any potential negative societal impacts of your work? [No]
|
| 252 |
+
(d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
|
| 253 |
+
|
| 254 |
+
2. If you are including theoretical results...
|
| 255 |
+
|
| 256 |
+
(a) Did you state the full set of assumptions of all theoretical results? [N/A]
|
| 257 |
+
(b) Did you include complete proofs of all theoretical results? [N/A]
|
| 258 |
+
|
| 259 |
+
3. If you ran experiments...
|
| 260 |
+
|
| 261 |
+
(a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes]
|
| 262 |
+
(b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes]
|
| 263 |
+
(c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [N/A]
|
| 264 |
+
(d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [Yes]
|
| 265 |
+
|
| 266 |
+
4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
|
| 267 |
+
|
| 268 |
+
(a) If your work uses existing assets, did you cite the creators? [N/A]
|
| 269 |
+
(b) Did you mention the license of the assets? [N/A]
|
| 270 |
+
(c) Did you include any new assets either in the supplemental material or as a URL? [N/A]
|
| 271 |
+
(d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
|
| 272 |
+
(e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [N/A]
|
| 273 |
+
|
| 274 |
+
5. If you used crowdsourcing or conducted research with human subjects...
|
| 275 |
+
|
| 276 |
+
(a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
|
| 277 |
+
(b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
|
| 278 |
+
(c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
|
acceleratingsparseconvolutionwithcolumnvectorwisesparsity/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:121e843beb4498b6e138f479a3d894911b986cc8499ad15074c676cdc32e9734
|
| 3 |
+
size 385029
|
acceleratingsparseconvolutionwithcolumnvectorwisesparsity/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2cde7d6aab986bc6f34c88f9991af77280486834d4c7d7c3c3951f869e018d04
|
| 3 |
+
size 320459
|
accelerationindistributedsparseregression/cc8497c6-8d71-4264-9b32-61dc9f402bc3_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:795fcbfbcfce27c1c25895d1f74d9aa929c4d99e1f74e0d0d79e9941a60097dc
|
| 3 |
+
size 89129
|
accelerationindistributedsparseregression/cc8497c6-8d71-4264-9b32-61dc9f402bc3_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:09723c6340a0f80b9d51dcaef8782fe284994bf40048300c4882d3a677fef3b9
|
| 3 |
+
size 109009
|