Add Batch 43e2b4c8-cd11-4703-bef8-79c88c536615
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- acceleratecnnsfromthreedimensionsacomprehensivepruningframework/1bf3fb9c-a8b2-48ad-8720-9582d7b73939_content_list.json +3 -0
- acceleratecnnsfromthreedimensionsacomprehensivepruningframework/1bf3fb9c-a8b2-48ad-8720-9582d7b73939_model.json +3 -0
- acceleratecnnsfromthreedimensionsacomprehensivepruningframework/1bf3fb9c-a8b2-48ad-8720-9582d7b73939_origin.pdf +3 -0
- acceleratecnnsfromthreedimensionsacomprehensivepruningframework/full.md +404 -0
- acceleratecnnsfromthreedimensionsacomprehensivepruningframework/images.zip +3 -0
- acceleratecnnsfromthreedimensionsacomprehensivepruningframework/layout.json +3 -0
- acceleratedalgorithmsforsmoothconvexconcaveminimaxproblemswitho1k2rateonsquaredgradientnorm/3de512a1-b47a-480c-b003-57ceeb2f54b6_content_list.json +3 -0
- acceleratedalgorithmsforsmoothconvexconcaveminimaxproblemswitho1k2rateonsquaredgradientnorm/3de512a1-b47a-480c-b003-57ceeb2f54b6_model.json +3 -0
- acceleratedalgorithmsforsmoothconvexconcaveminimaxproblemswitho1k2rateonsquaredgradientnorm/3de512a1-b47a-480c-b003-57ceeb2f54b6_origin.pdf +3 -0
- acceleratedalgorithmsforsmoothconvexconcaveminimaxproblemswitho1k2rateonsquaredgradientnorm/full.md +600 -0
- acceleratedalgorithmsforsmoothconvexconcaveminimaxproblemswitho1k2rateonsquaredgradientnorm/images.zip +3 -0
- acceleratedalgorithmsforsmoothconvexconcaveminimaxproblemswitho1k2rateonsquaredgradientnorm/layout.json +3 -0
- acceleratingfeedforwardcomputationviaparallelnonlinearequationsolving/81d1cb54-2fa4-4b89-9d22-ce405d95e3f8_content_list.json +3 -0
- acceleratingfeedforwardcomputationviaparallelnonlinearequationsolving/81d1cb54-2fa4-4b89-9d22-ce405d95e3f8_model.json +3 -0
- acceleratingfeedforwardcomputationviaparallelnonlinearequationsolving/81d1cb54-2fa4-4b89-9d22-ce405d95e3f8_origin.pdf +3 -0
- acceleratingfeedforwardcomputationviaparallelnonlinearequationsolving/full.md +316 -0
- acceleratingfeedforwardcomputationviaparallelnonlinearequationsolving/images.zip +3 -0
- acceleratingfeedforwardcomputationviaparallelnonlinearequationsolving/layout.json +3 -0
- acceleratinggossipsgdwithperiodicglobalaveraging/a88e4acc-b2d7-4bd2-a331-f7ac91a34d5e_content_list.json +3 -0
- acceleratinggossipsgdwithperiodicglobalaveraging/a88e4acc-b2d7-4bd2-a331-f7ac91a34d5e_model.json +3 -0
- acceleratinggossipsgdwithperiodicglobalaveraging/a88e4acc-b2d7-4bd2-a331-f7ac91a34d5e_origin.pdf +3 -0
- acceleratinggossipsgdwithperiodicglobalaveraging/full.md +414 -0
- acceleratinggossipsgdwithperiodicglobalaveraging/images.zip +3 -0
- acceleratinggossipsgdwithperiodicglobalaveraging/layout.json +3 -0
- acceleratingsafereinforcementlearningwithconstraintmismatchedbaselinepolicies/742e98f5-2a06-4f94-98c6-f2fdd57168cf_content_list.json +3 -0
- acceleratingsafereinforcementlearningwithconstraintmismatchedbaselinepolicies/742e98f5-2a06-4f94-98c6-f2fdd57168cf_model.json +3 -0
- acceleratingsafereinforcementlearningwithconstraintmismatchedbaselinepolicies/742e98f5-2a06-4f94-98c6-f2fdd57168cf_origin.pdf +3 -0
- acceleratingsafereinforcementlearningwithconstraintmismatchedbaselinepolicies/full.md +449 -0
- acceleratingsafereinforcementlearningwithconstraintmismatchedbaselinepolicies/images.zip +3 -0
- acceleratingsafereinforcementlearningwithconstraintmismatchedbaselinepolicies/layout.json +3 -0
- accelerationviafractallearningrateschedules/70131c0f-ec80-4daa-acca-0e567d7e2d43_content_list.json +3 -0
- accelerationviafractallearningrateschedules/70131c0f-ec80-4daa-acca-0e567d7e2d43_model.json +3 -0
- accelerationviafractallearningrateschedules/70131c0f-ec80-4daa-acca-0e567d7e2d43_origin.pdf +3 -0
- accelerationviafractallearningrateschedules/full.md +595 -0
- accelerationviafractallearningrateschedules/images.zip +3 -0
- accelerationviafractallearningrateschedules/layout.json +3 -0
- accumulateddecoupledlearningwithgradientstalenessmitigationforconvolutionalneuralnetworks/96c69955-85e8-43e3-8f6d-709b44b2162d_content_list.json +3 -0
- accumulateddecoupledlearningwithgradientstalenessmitigationforconvolutionalneuralnetworks/96c69955-85e8-43e3-8f6d-709b44b2162d_model.json +3 -0
- accumulateddecoupledlearningwithgradientstalenessmitigationforconvolutionalneuralnetworks/96c69955-85e8-43e3-8f6d-709b44b2162d_origin.pdf +3 -0
- accumulateddecoupledlearningwithgradientstalenessmitigationforconvolutionalneuralnetworks/full.md +446 -0
- accumulateddecoupledlearningwithgradientstalenessmitigationforconvolutionalneuralnetworks/images.zip +3 -0
- accumulateddecoupledlearningwithgradientstalenessmitigationforconvolutionalneuralnetworks/layout.json +3 -0
- accuracyinterpretabilityanddifferentialprivacyviaexplainableboosting/e778803f-5d9e-4354-9df2-15268aa14b64_content_list.json +3 -0
- accuracyinterpretabilityanddifferentialprivacyviaexplainableboosting/e778803f-5d9e-4354-9df2-15268aa14b64_model.json +3 -0
- accuracyinterpretabilityanddifferentialprivacyviaexplainableboosting/e778803f-5d9e-4354-9df2-15268aa14b64_origin.pdf +3 -0
- accuracyinterpretabilityanddifferentialprivacyviaexplainableboosting/full.md +326 -0
- accuracyinterpretabilityanddifferentialprivacyviaexplainableboosting/images.zip +3 -0
- accuracyinterpretabilityanddifferentialprivacyviaexplainableboosting/layout.json +3 -0
- accuracyonthelineonthestrongcorrelationbetweenoutofdistributionandindistributiongeneralization/c0c386ca-49b0-4e49-9828-2123bf346129_content_list.json +3 -0
- accuracyonthelineonthestrongcorrelationbetweenoutofdistributionandindistributiongeneralization/c0c386ca-49b0-4e49-9828-2123bf346129_model.json +3 -0
acceleratecnnsfromthreedimensionsacomprehensivepruningframework/1bf3fb9c-a8b2-48ad-8720-9582d7b73939_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7d636716be21fff42ff431f4edde7e5a83925ce3ec290a86191c1bb843830bf3
|
| 3 |
+
size 92858
|
acceleratecnnsfromthreedimensionsacomprehensivepruningframework/1bf3fb9c-a8b2-48ad-8720-9582d7b73939_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e945cff114bee8614763cc2d79fa16c0ab7d5babce01d4d5b19537574f8f0eda
|
| 3 |
+
size 107575
|
acceleratecnnsfromthreedimensionsacomprehensivepruningframework/1bf3fb9c-a8b2-48ad-8720-9582d7b73939_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9c0cd9e4c3f2c8ffc73d790ccdb5adbb03f4b51b4956c6c8d80dc3a6db7e629f
|
| 3 |
+
size 3397906
|
acceleratecnnsfromthreedimensionsacomprehensivepruningframework/full.md
ADDED
|
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerate CNNs from Three Dimensions: A Comprehensive Pruning Framework
|
| 2 |
+
|
| 3 |
+
Wenxiao Wang $^{12}$ Minghao Chen $^{1}$ Shuai Zhao $^{1}$ Long Chen $^{3,4}$ Jinming Hu $^{1}$ Haifeng Liu $^{1}$ Deng Cai $^{1}$ Xiaofei He $^{1}$ Wei Liu $^{2}$
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
Most neural network pruning methods, such as filter-level and layer-level prunings, prune the network model along one dimension (depth, width, or resolution) solely to meet a computational budget. However, such a pruning policy often leads to excessive reduction of that dimension, thus inducing a huge accuracy loss. To alleviate this issue, we argue that pruning should be conducted along three dimensions comprehensively. For this purpose, our pruning framework formulates pruning as an optimization problem. Specifically, it first casts the relationships between a certain model's accuracy and depth/width/resolution into a polynomial regression and then maximizes the polynomial to acquire the optimal values for the three dimensions. Finally, the model is pruned along the three optimal dimensions accordingly. In this framework, since collecting too much data for training the regression is very time-costly, we propose two approaches to lower the cost: 1) specializing the polynomial to ensure an accurate regression even with less training data; 2) employing iterative pruning and fine-tuning to collect the data faster. Extensive experiments show that our proposed algorithm surpasses state-of-the-art pruning algorithms and even neural architecture search-based algorithms.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
To deploy pre-trained Convolutional Neural Networks (CNNs) (Simonyan & Zisserman, 2015; He et al., 2016; Huang et al., 2017; Tan & Le, 2019) on resource-constrained mobile devices, plenty of methods (Ba & Caruana, 2014; Hinton et al., 2015; Liu et al., 2017; He et al., 2019; Frankle
|
| 12 |
+
|
| 13 |
+
$^{1}$ State Key Lab of CAD&CG, Zhejiang University, China $^{2}$ Tencent Data Platform, China $^{3}$ Columbia University, US $^{4}$ Tencent, China. This work was done when Long Chen was at Tencent. Correspondence to: Deng Cai <dengcai@gmail.com>.
|
| 14 |
+
|
| 15 |
+
Proceedings of the $38^{th}$ International Conference on Machine Learning, PMLR 139, 2021. Copyright 2021 by the author(s).
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1. Accuracies of a base neural network model with different pruning policies on TinyImageNet. The base model is ResNet-56 (FLOPs-reduction-ratio = 0). $\mathcal{X}$ -only means that the model is pruned only along $\mathcal{X}$ dimension, and "comprehensive" means that three dimensions are pruned comprehensively. A larger FLOPs-reduction-ratio implies a higher acceleration ratio.
|
| 19 |
+
|
| 20 |
+
& Carbin, 2019) have been proposed for model acceleration. Among them, neural network pruning, which prunes redundant components (e.g., filters) of CNNs to cater for a computational budget, is one of the most popular and is the focus of this paper.
|
| 21 |
+
|
| 22 |
+
Currently, the dominant pruning methods fall into three categories: (1) layer-level pruning (Wang et al., 2019b; Lin et al., 2019), which prunes redundant layers and reduces model's depth, (2) filter-level pruning (Liu et al., 2017; Li et al., 2017; Molchanov et al., 2017; He et al., 2019; Wang et al., 2019a; Luo et al., 2019; Kang & Han, 2020; Ye et al., 2020a), which prunes redundant filters and reduces model's width, and (3) image-level pruning<sup>1</sup> (Howard et al., 2017; Tan & Le, 2019; Han et al., 2020), which resize images and reduces model's input resolution. These three kinds of pruning methods respectively focus on one single dimension (i.e., depth, width, or image resolution) that impacts on a model's computational cost.
|
| 23 |
+
|
| 24 |
+
Naturally, we raise an important question overlooked by much previous work: given a pre-trained neural network model, which dimension — depth, width, or resolution — should we prune to minimize the model's accuracy loss? In practice, users empirically choose a redundant dimension, which, however, often leads to a sub-optimal pruned model because of an inappropriate dimension choice. Even worse, excessive pruning of whichever dimension will cause an unacceptable loss, as shown in Figure 1. Instead, comprehensively pruning these three dimensions yields a much lower loss than solely pruning whichever dimension, demonstrated by Figure 1, therefore enabling model acceleration with much better quality.
|
| 25 |
+
|
| 26 |
+
In this paper, we propose a framework that prunes three dimensions comprehensively. Instead of solely pruning one dimension to reduce the computational cost, our framework first decides how much of each dimension should be pruned. To this end, we formulate model acceleration as an optimization problem. Precisely, given a pre-trained neural network model and a target computational cost, assuming that the pruned model's depth, width, and resolution are $d \times 100\%$ , $w \times 100\%$ , and $r \times 100\%$ of the original model, respectively, we seek the optimal $(d, w, r)$ that maximizes the model's accuracy $-a$ :
|
| 27 |
+
|
| 28 |
+
$$
|
| 29 |
+
\max _ {d, w, r} a := \mathcal {F} (d, w, r), \text {s . t .} \mathcal {C} (d, w, r) = \tau , \tag {1}
|
| 30 |
+
$$
|
| 31 |
+
|
| 32 |
+
where $\mathcal{F}(d,w,r)$ is a Model Accuracy Predictor (MAP). $\mathcal{C}(d,w,r)$ and $\tau$ represent the model's computational cost and its constraint, respectively. (Tan & Le, 2019) has designed a reasonable expression for $\mathcal{C}(d,w,r)$ . However, designing a MAP manually is unachievable as its form can be arbitrarily complicated or even varies with the architecture (e.g., the MAPs for ResNet and MobileNet may be in different forms). Hence, we propose approximating the MAP via a polynomial regression, because polynomials can approximate arbitrary continuous functions according to Taylor's theorem. Specifically, we can formulate the MAP as a polynomial and collect a sufficient set of $(d,w,r,a)$ as training data to estimate its parameters. Then, problem (1) can be solved with Lagrange's multiplier theorem, and the model is eventually pruned in terms of the optimized $(d,w,r)$ .
|
| 33 |
+
|
| 34 |
+
The main challenge that this framework encounters is that the polynomial regression requires tremendous training data (i.e., $\{(d, w, r, a)\}$ ), while the collection of the data is very costly because fetching each item of data, i.e., a $(d, w, r, a)$ , means training a new neural network model from scratch. To reduce both the collection time and model training cost, we improve the framework in two aspects: 1) A specialized polynomial is proposed whose weight tensor is replaced with its low-rank substitute. The low-rank weight tensor prevents the polynomial from overfitting and ensures an accurate regression even with limited training data. Further, as a bonus, the updated MAP owns a more concise form.
|
| 35 |
+
|
| 36 |
+
2) Given a pre-trained model, we prune and fine-tune it iteratively to acquire a series of new models and their corresponding $\{(d,w,r,a)\}$ , which is much faster than training such new models from scratch.
|
| 37 |
+
|
| 38 |
+
Extensive experiments are conducted to show the superiority of our proposed pruning algorithm over the state-of-the-art pruning algorithms. Further, we compare against some algorithms that balance the size of three dimensions (depth, width, and resolution) from a Neural Architecture Search (NAS) perspective. The comparative results also show our advantages over them.
|
| 39 |
+
|
| 40 |
+
It is worth highlighting that the contributions of this work are three-fold:
|
| 41 |
+
|
| 42 |
+
- We propose to prune a model along three dimensions comprehensively and determine the optimal values for these dimensions by solving a polynomial regression and subsequently an optimization problem.
|
| 43 |
+
- To complete the regression process with an acceptable cost, we apply two approaches: 1) specializing a MAP adapting to the scenario of limited training data; 2) using iterative pruning and fine-tuning to collect data faster.
|
| 44 |
+
- We do extensive experiments to validate that our proposed algorithm outperforms state-of-the-art pruning and even NAS-based model acceleration algorithms.
|
| 45 |
+
|
| 46 |
+
# 2. Background and Related Work
|
| 47 |
+
|
| 48 |
+
Neural Network Pruning: In the early stage, neural network pruning is done at the weight-level (Han et al., 2016; Frankle & Carbin, 2019; Sehwag et al., 2020; Ye et al., 2020b; Frankle & Carbin, 2019; Lee et al., 2019). However, it needs specific libraries for sparse matrix calculation (e.g., cuSPARSE) to accelerate the inference, while these libraries' support on mobile devices is restricted. Nowadays, the most dominant pruning methods are at the filter-level, layer-level, or image-level, directly reducing the computational cost for all devices. Filter-level pruning (Liu et al., 2017; Li et al., 2017; Molchanov et al., 2017; He et al., 2018; 2019; Wang et al., 2019a; Kang & Han, 2020; Ye et al., 2020a; Li et al., 2020; Wang et al., 2020) compresses models by removing unimportant filters in CNNs, layer-level pruning (Wang et al., 2019b) does that by pruning redundant layers, and image-level pruning (Howard et al., 2017) saves computation by using small input images. They all receive great success in pruning CNNs. However, focusing on pruning one dimension solely also restricts their potentials.
|
| 49 |
+
|
| 50 |
+
Multi-Dimension Pruning: To the best of our knowledge, there are two methods (Wen et al., 2016; Lin et al., 2019) which prune models at both the filter- and layer-levels.
|
| 51 |
+
|
| 52 |
+

|
| 53 |
+
Figure 2. The pipeline of the proposed pruning framework. It first prunes a pre-trained model from three dimensions independently, yielding a set of $(d_n, w_n, r_n, a_n)$ that is taken as training data. Then, the training data is used to fit our specialized MAP $(\mathcal{F})$ via a polynomial regression. The optimal $(d^{\star}, w^{\star}, r^{\star})$ is then acquired by maximizing $\mathcal{F}$ subject to a computational cost constraint. Finally, the model will be pruned comprehensively in terms of $(d^{\star}, w^{\star}, r^{\star})$ .
|
| 54 |
+
|
| 55 |
+
Both of them train models with extra regularization terms and induce sparsity into the models. Then the filters or layers with much sparsity will be pruned with a slight loss incurred. However, the same method cannot be used for balancing image size because images do not contain trainable parameters, and there is no way to induce sparsity into the images. In contrast, our proposed framework can balance three dimensions comprehensively, yielding better model acceleration results than the above mentioned methods.
|
| 56 |
+
|
| 57 |
+
Pruning vs. NAS: Pruning and NAS (Pham et al., 2018; Gao et al., 2020; He et al., 2020; Tian et al., 2020; Liu et al., 2019; Tan & Le, 2019; Han et al., 2020; Howard et al., 2019; Huang et al., 2019; Zoph & Le, 2017) share the same goal, that is, maximizing a certain model's accuracy given a computational budget. However, their settings are very different: pruning shrinks the model from a pre-trained one, utilizing both pre-trained model's architecture and weights, while NAS searches the model(s) from scratch. Therefore, though several algorithms (Tan & Le, 2019; Han et al., 2020) also attempt to balance the three dimensions (i.e., depth, width, and resolution) of the model from a NAS perspective, they cannot be applied for pruning directly.
|
| 58 |
+
|
| 59 |
+
# 3. Proposed Framework
|
| 60 |
+
|
| 61 |
+
# 3.1. Preliminaries
|
| 62 |
+
|
| 63 |
+
For a model $M$ , we define $\mathcal{D}(M)$ , $\mathcal{W}(M,l)$ , and $\mathcal{R}(M)$ as its depth, width, and input resolution. Specifically, $\mathcal{D}(M)$ represents the number of blocks² that $M$ contains; $\mathcal{W}(M,l)$ denotes the number of filters of a certain layer $l$ in the model
|
| 64 |
+
|
| 65 |
+
$M$ ; $\mathcal{R}(M)$ is the side length of $M$ 's input image. Given a pre-trained model $M_0$ , we also define $d_n$ , $w_n$ , and $r_n$ of a pruned model $M_n$ as:
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
d _ {n} = \frac {\mathcal {D} (M _ {n})}{\mathcal {D} (M _ {0})}, w _ {n} = \frac {\mathcal {W} (M _ {n} , l)}{\mathcal {W} (M _ {0} , l)}, r _ {n} = \frac {\mathcal {R} (M _ {n})}{\mathcal {R} (M _ {0})}. \tag {2}
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
For filter pruning, following previous work (Luo et al., 2019; He et al., 2019; Lin et al., 2020), we prune all layers with the same ratio, so $w_{n}$ of a model has no concern with the choice of layer $l$ . Further, for a pruning task, it is easy to know: $d_{n}, w_{n}, r_{n} \in (0,1]$ and $d_{0} = w_{0} = r_{0} = 1$ .
|
| 72 |
+
|
| 73 |
+
The pipeline of our proposed pruning framework is introduced in Figure 2. Unlike previous work that prunes one dimension solely, we first look for a pruning policy (i.e., how much of each dimension should be pruned) which aims to maximize the model's accuracy in Section 3.2. Then, we depict the process of pruning and fine-tuning a target model in terms of pruning policy in Section 3.3.
|
| 74 |
+
|
| 75 |
+
# 3.2. Model Acceleration as Optimization
|
| 76 |
+
|
| 77 |
+
# 3.2.1. FORMULATION
|
| 78 |
+
|
| 79 |
+
Given a CNN architecture, the model's depth, width, and image resolution are three key aspects that affect both the model's accuracy and its computational cost. Thus, model acceleration can be formulated as the following problem:
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
d ^ {\star}, w ^ {\star}, r ^ {\star} = \underset {d, w, r} {\arg \max } \mathcal {F} (d, w, r; \Theta) \tag {3}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\begin{array}{l} \text {s . t .} \mathcal {C} (d, w, r) = T \times \mathcal {C} (d _ {0}, w _ {0}, r _ {0}), \end{array}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
where $\mathcal{F}(d,w,r;\Theta)$ is a Model Accuracy Predictor (MAP), which predicts the model's accuracy given $(d,w,r)$ .
|
| 90 |
+
|
| 91 |
+
$\Theta$ contains the parameters of the MAP. $\mathcal{C}(d,w,r)$ represents the computational cost (e.g., FLOPs) of a model. $T\in (0,1)$ implies that the pruned model's computational cost is $T$ proportion of the original model. Problem (3) can be solved using Lagrange's multiplier theorem once $\mathcal{F}(d,w,r)$ and $\mathcal{C}(d,w,r)$ are known. Following (Tan & Le, 2019) in which a model's computational cost is proportional to $d,w^2$ , and $r^2$ , we re-define $\mathcal{C}(d,w,r)$ as:
|
| 92 |
+
|
| 93 |
+
$$
|
| 94 |
+
\mathcal {C} (d, w, r) = d w ^ {2} r ^ {2}. \tag {4}
|
| 95 |
+
$$
|
| 96 |
+
|
| 97 |
+
However, designing a MAP manually is unachievable as its form can be arbitrarily complicated, and different architectures may own different forms. An intuitive idea is resorting to a polynomial regression because any continuous function can be approximated with polynomials according to Taylor's theorem. Specifically, we can train $N$ models with different $(d, w, r)$ , attaining their accuracy $a$ , and fit the MAP with a polynomial by using $\{(d_n, w_n, r_n, a_n)\}_{n=1}^N$ as training data. However, the regression process requires hundreds of data items $(d_n, w_n, r_n, a_n)$ for training an accurate regression, whereas fetching each item of that data needs us to train a new model from scratch, which is very resource-inefficient and time-consuming. To overcome this obstacle, on the one hand, we specialize a MAP that ensures an accurate regression even with less training data in Section 3.2.2. On the other hand, we expedite acquiring each data item $(d_n, w_n, r_n, a_n)$ by employing iterative pruning and fine-tuning in Section 3.2.3.
|
| 98 |
+
|
| 99 |
+
# 3.2.2. SPECIALIZED MAP
|
| 100 |
+
|
| 101 |
+
The polynomial-shaped MAP can be represented as:
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
\mathcal {F} (d, w, r; \Theta) = \sum_ {i, j, k = 0} ^ {\kappa} \theta_ {i j k} d ^ {i} w ^ {j} r ^ {k}, \tag {5}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
where $\Theta \in \mathbb{R}^{(\mathcal{K} + 1)\times (\mathcal{K} + 1)\times (\mathcal{K} + 1)}$ is a tensor, and all $\theta_{ijk}$ are its elements. Without any constraint on $\Theta$ , the polynomial can be highly flexible and expressive. However, high flexibility also makes it easy to overfit (Bishop, 2006), especially when the training data (i.e., $\{(d,w,r,a)\}$ ) is scarce. To avoid overfitting and ensure an accurate regression with limited training data, a relatively simple MAP with less flexibility and expressiveness is needed. We achieve this by restricting the rank of $\Theta$ during the regression process, i.e., $\Theta$ in the MAP is replaced by its low-rank substitute. Formally, for $\Theta$ of rank $\widetilde{\mathcal{R}}$ , its $\mathcal{R}$ -rank substitute $(\mathcal{R} < \widetilde{\mathcal{R}})$ and elements are defined as (Kolda & Bader, 2009):
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\Theta \approx \sum_ {q = 1} ^ {\mathcal {R}} \vec {s _ {q}} \otimes \vec {u _ {q}} \otimes \vec {v _ {q}}, \quad \theta_ {i j k} \approx \sum_ {q = 1} ^ {\mathcal {R}} s _ {q i} u _ {q j} v _ {q k}, \tag {6}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
# Algorithm 1 Iterative Pruning and Fine-tuning
|
| 114 |
+
|
| 115 |
+
input pre-trained $M_0$ , rounds $rds$ , pruning setting $T$
|
| 116 |
+
|
| 117 |
+
initialize train_data = $\{(d_0, w_0, r_0, a_0)\}$
|
| 118 |
+
|
| 119 |
+
function PruneAlong(dimension, $x_0$ , $x_{min}$ )
|
| 120 |
+
|
| 121 |
+
for $n = 1$ to $rds$ do
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
x _ {n} = x _ {n - 1} - \frac {x _ {0} - x _ {m i n}}{r d s}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
pruning $M_{n - 1}$ along dimension to $x_{n}\to M_{n}$
|
| 128 |
+
|
| 129 |
+
fine-tuning $M_{n}\rightarrow (d_{n},w_{n},r_{n},a_{n})$
|
| 130 |
+
|
| 131 |
+
add $(d_n, w_n, r_n, a_n)$ to train_data
|
| 132 |
+
|
| 133 |
+
# end for
|
| 134 |
+
|
| 135 |
+
# end function
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
d _ {m i n} = T d _ {0}, w _ {m i n} = \sqrt {T} w _ {0}, r _ {m i n} = \sqrt {T} r _ {0}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
PruneAlong("depth", $d_0$ , $d_{min}$ )
|
| 142 |
+
|
| 143 |
+
PruneAlong("width", w0, wmin)
|
| 144 |
+
|
| 145 |
+
PruneAlong("resolution", $r_0, r_{min}$ )
|
| 146 |
+
|
| 147 |
+
return train_data
|
| 148 |
+
|
| 149 |
+
in which $\otimes$ represents outer product, and $\vec{s_q},\vec{u_q},\vec{v_q}\in$ $\mathbb{R}^{\mathcal{K} + 1}$ represent $(\mathcal{K} + 1)$ -dimensional vectors, e.g., $\vec{s_q} = [s_{q0},s_{q1},\dots ,s_{q\mathcal{K}}]^\top$ . Then, replacing $\theta_{ijk}$ in Eq. (5) yields:
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\begin{array}{l} \mathcal {F} (d, w, r; \Theta) \approx \sum_ {i, j, k = 0} ^ {\mathcal {K}} \sum_ {q = 1} ^ {\mathcal {R}} s _ {q i} u _ {q j} v _ {q k} d ^ {i} w ^ {j} r ^ {k} \\ = \sum^ {\mathcal {R}} \sum^ {\mathcal {K}} (s _ {q i} d ^ {i}) (u _ {q j} w ^ {j}) (v _ {q k} r ^ {k}) \\ q = 1 i, j, k = 0 \tag {7} \\ = \sum_ {q = 1} ^ {\mathcal {R}} \sum_ {i = 0} ^ {\mathcal {K}} s _ {q i} d ^ {i} \sum_ {j = 0} ^ {\mathcal {K}} u _ {q j} w ^ {j} \sum_ {k = 0} ^ {\mathcal {K}} v _ {q k} r ^ {k} \\ = \sum_ {q = 1} ^ {\mathcal {R}} \mathcal {H} (d; \vec {s _ {q}}) \mathcal {H} (w; \vec {u _ {q}}) \mathcal {H} (r; \vec {v _ {q}}), \\ \end{array}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
in which $\mathcal{H}$ represents univariate polynomial. In practice, we take Eq. (7) as our MAP and control its flexibility by adjusting $\mathcal{R}$ . A smaller $\mathcal{R}$ indicates a simpler MAP. Empirically, we find that $\mathcal{R} = 1$ is enough for achieving an accurate regression in most cases, which provides our MAP with a highly succinct form. We also verify through experiments that $\mathcal{R} = 1$ makes sense because it accords with the prior of the MAP (Section 4.3).
|
| 156 |
+
|
| 157 |
+
# 3.2.3. FAST DATA COLLECTION
|
| 158 |
+
|
| 159 |
+
To collect data used for MAP's regression, instead of training many models with different $(d,w,r)$ from scratch, we apply iterative pruning and fine-tuning to acquire the data.
|
| 160 |
+
|
| 161 |
+
Iterative Pruning and Fine-tuning: As shown in Algorithm 1, the pre-trained model $M_0$ is pruned along three dimensions independently. At each dimension, we iteratively apply pruning and fine-tuning on $M_0$ to generate many models, and the configurations $\{(d_n, w_n, r_n, a_n)\}$ of
|
| 162 |
+
|
| 163 |
+
these models are collected for the MAP's regression. $d_{min}$ in Algorithm 1 indicates that if we reduce the model's depth to $d_{min}$ , the computational cost constraint $T$ can be fulfilled without pruning the model's width and input resolution. It is easy to deduce that the optimal $d^{\star} \geq d_{min}$ . Likewise, $w_{min}$ and $r_{min}$ in Algorithm 1 are minimal possible values for $w$ and $r$ , respectively.
|
| 164 |
+
|
| 165 |
+
Compared with training models from scratch, our data collection strategy enjoys two advantages: 1) A pruned pretrained model converges much faster than the one training from scratch, thus taking much less time to obtain a new model. 2) Besides the finally pruned model, iterative pruning yields several intermediate models as well as their configurations $\{(d_n, w_n, r_n, a_n)\}$ , which can also be used for the MAP's regression.
|
| 166 |
+
|
| 167 |
+
# 3.2.4. OPTIMIZING THE MAP
|
| 168 |
+
|
| 169 |
+
With the collected data, we fit the MAP by using a regression algorithm. Then, the optimal $(d^{\star},w^{\star},r^{\star})$ satisfies Eq. (8) according to Lagrange's multiplier theorem, where $\lambda$ is the Lagrange multiplier.
|
| 170 |
+
|
| 171 |
+
$$
|
| 172 |
+
\left\{ \begin{array}{c} d w ^ {2} r ^ {2} - T \times d _ {0} w _ {0} ^ {2} r _ {0} ^ {2} = 0 \\ \sum_ {q = 1} ^ {\mathcal {R}} \mathcal {H} ^ {\prime} (d; \vec {s _ {q}}) \mathcal {H} (w; \vec {u _ {q}}) \mathcal {H} (r; \vec {v _ {q}}) + \lambda w ^ {2} r ^ {2} = 0 \\ \sum_ {q = 1} ^ {\mathcal {R}} \mathcal {H} (d; \vec {s _ {q}}) \mathcal {H} ^ {\prime} (w; \vec {u _ {q}}) \mathcal {H} (r; \vec {v _ {q}}) + 2 \lambda d w r ^ {2} = 0 \\ \sum_ {q = 1} ^ {\mathcal {R}} \mathcal {H} (d; \vec {s _ {q}}) \mathcal {H} (w; \vec {u _ {q}}) \mathcal {H} ^ {\prime} (r; \vec {v _ {q}}) + 2 \lambda d w ^ {2} r = 0 \end{array} \right. \tag {8}
|
| 173 |
+
$$
|
| 174 |
+
|
| 175 |
+
# 3.3. Comprehensive Pruning and Fine-tuning
|
| 176 |
+
|
| 177 |
+
Leveraging the optimal $(d^{\star}, w^{\star}, r^{\star})$ , filter-level pruning and layer-level pruning are applied to prune a pre-trained model $M_0$ to the target $d^{\star}$ and $w^{\star}$ , and then the model is fine-tuned with images of size $r^{\star}$ . During the entire pruning process, layer-pruning first and filter-pruning first are both viable and yield the same pruned model. Without loss of generality, we describe the pruning process by assuming layer-pruning first, and the concrete steps are as follows:
|
| 178 |
+
|
| 179 |
+
Pruning Layers: Following DBP (Wang et al., 2019b), we put a linear classifier after each layer of model $M_0$ and test its accuracy on the evaluation dataset. The accuracy of each linear classifier indicates the discrimination of its corresponding layer's features. Further, each layer's discrimination enhancement compared with its preceding layer is seen as the importance of the layer. With this importance metric, we pick out the least important $(1 - d^{\star} / d_{0}) \times 100\%$ layers and remove them from $M_0$ , yielding $M_{p_1}$ .
|
| 180 |
+
|
| 181 |
+
Pruning Filters: Filter-level pruning is performed over $M_{p_1}$ . In particular, we use the scaling factor of BN layers
|
| 182 |
+
|
| 183 |
+
as the importance metric, just like Slimming (Liu et al., 2017). However, different from Slimming that compare the importances of all filters globally, we only compare the importances of filters in the same layer, and the least important $(1 - w^{\star} / w_{0})\times 100\%$ filters of each layer will be pruned. Through such a modification, the pruned ratios of all layers are kept the same. Assume the model after filter-pruning to be $M_{p_2}$
|
| 184 |
+
|
| 185 |
+
Fine-tuning with Smaller Images: After pruning, the pruned model $M_{p_2}$ is fine-tuned with images of size $r^{\star}$ . The images are resized by bilinear down-sampling, which is the most common down-sampling scheme for images. The model will be fine-tuned with a small learning rate till convergence, leading to the finally pruned model $M_p$ .
|
| 186 |
+
|
| 187 |
+
# 4. Experiments
|
| 188 |
+
|
| 189 |
+
# 4.1. Experimental Settings
|
| 190 |
+
|
| 191 |
+
Datasets: We take three popular datasets as testbeds of our algorithm: CIFAR-10 (Krizhevsky et al., 2009), TinyImageNet (Wu et al., 2017), and ImageNet (Russakovsky et al., 2015). These three datasets differ in their image-resolutions $(32 \times 32$ to $224 \times 224)$ , number of classes (10 to 1000), and scale of datasets (50K to 1000K images). For all the datasets, images are augmented by symmetric padding, random clipping, and randomly horizontal flip, all of which are common (He et al., 2016; Howard et al., 2017; Wang et al., 2019a) augmentation methods for these datasets.
|
| 192 |
+
|
| 193 |
+
Architectures: We test our algorithm on three popular network architectures: ResNet (He et al., 2016), DenseNet (Huang et al., 2017), and EfficientNet (Tan & Le, 2019). Their basic blocks vary from residual blocks to densely connected blocks and NAS-searched blocks, representing three of the most popular designs for deep CNNs.
|
| 194 |
+
|
| 195 |
+
Evaluation Protocol: Following the conventions of previous work (Li et al., 2020; Lin et al., 2020; Ye et al., 2020a), we take the accuracy, parameters-reduction-ratio $(Prr)$ , and FLOPs-reduction-ratio $(Frr)$ as the evaluation protocol of our model acceleration algorithm. $Prr$ and $Frr$ are defined as Eq. (9), where $M_0$ and $M_p$ represent the base model and the pruned model, respectively.
|
| 196 |
+
|
| 197 |
+
$$
|
| 198 |
+
P r r = 1 - \frac {\operatorname {P a r a m s} \left(M _ {p}\right)}{\operatorname {P a r a m s} \left(M _ {0}\right)}, F r r = 1 - \frac {F L O P s \left(M _ {p}\right)}{F L O P s \left(M _ {0}\right)}. \tag {9}
|
| 199 |
+
$$
|
| 200 |
+
|
| 201 |
+
Compared Algorithms: The compared algorithms fall into three categories: (1) Algorithms solely pruning the model along one dimension (i.e., depth, width, or resolution), including $\mathcal{R}$ -only (Howard et al., 2017), $\mathcal{W}$ -only (Liu et al., 2017), FPGM (He et al., 2019), DBP (Wang et al., 2019b), PScratch (Wang et al., 2020), DHP (Li et al., 2020),
|
| 202 |
+
|
| 203 |
+
and HRank (Lin et al., 2020); (2) Algorithms that prune along multi-dimensions, such as GAL (Lin et al., 2019); (3) NAS-based algorithms, including EfficientNet (Tan & Le, 2019) and TinyNet (Han et al., 2020), which balance the size of the three dimensions from the NAS perspective.
|
| 204 |
+
|
| 205 |
+
Training Settings: For base models trained on CIFAR-10, we set batch size to 64 for DenseNet and 128 for ResNet, respectively. Weight decay is set to $10^{-4}$ . The models are trained for 160 epochs with the learning rate starting from 0.1 and divided by 10 at epochs 80 and 120. These are all the most common training settings (He et al., 2016; Howard et al., 2017; Wang et al., 2019a) for models trained on CIFAR-10. For ResNet and DenseNet trained on TinyImageNet and ImageNet, batch size is set to 256, and weight decay is $10^{-4}$ . Models are trained for 100 epochs. The learning rate is set to 0.1 at the beginning and is multiplied by 0.1 at epochs 30, 60, and 90. For EfficientNet, we apply the same training policy as (Han et al., 2020), which is also the most common for EfficientNet implemented with PyTorch (Paszke et al., 2017).
|
| 206 |
+
|
| 207 |
+
Regression and Pruning Settings: The MAP's hyperparameters are set to $\mathcal{R} = 1$ and $\mathcal{K} = 3$ in our pruning experiments. When collecting training data (i.e., $\{(d_n, w_n, r_n)\}_{n=1}^N$ ) for the polynomial regression, the model is pruned along each dimension for four times (i.e., $rds = 4$ in Algorithm 1). ResNet and DenseNet trained on CIFAR-10 are fine-tuned for 40 epochs at each round, and for 80 epochs after comprehensive pruning. Therefore, the data collection process consumes as much time as training 3 models (training one model from scratch costs 160 epochs). Similarly, models trained on TinyImageNet and ImageNet are fine-tuned for 30 epochs at each round of the iterative pruning process. Thus, it takes about the same time as training 3.6 models for the data collection process. The finally pruned models trained on TinyImageNet and ImageNet are fine-tuned for 60 epochs after comprehensive pruning.
|
| 208 |
+
|
| 209 |
+
# 4.2. Results and Analyses
|
| 210 |
+
|
| 211 |
+
Results on CIFAR-10 and TinyImageNet: The experimental results on CIFAR-10 and TinyImageNet are shown in Table 1. As we can see, $\mathcal{W}$ -only induces greater loss than $\mathcal{D}$ -only for ResNet-32 and ResNet-56, while for ResNet-101, the situation is opposite. In other words, the importance of different dimensions lies in the original size of depth, width, and resolution, and we cannot deduce it from a simple prior, which further shows the essentiality of our algorithm. We balance the size of the three dimensions dynamically and always achieve better results than pruning one or two dimensions. The most competitive opponent of our algorithm is DHP, which achieves similar accuracy and $Frr$ to our algorithm for ResNet-56 trained on both datasets. However, we show higher accuracy than DHP
|
| 212 |
+
|
| 213 |
+
for DenseNet-40 on CIFAR-10 (94.54% vs. 93.94%), for ResNet-101 (65.27% vs. 64.82%) on TinyImageNet, and for DenseNet-100 (60.22% vs. 59.40%) on TinyImageNet with similar $P_{rr}$ and $F_{rr}$ , which sheds light on the robustness of our algorithm across different architectures and datasets.
|
| 214 |
+
|
| 215 |
+
Results on ImageNet: Experiments with ImageNet are done on ResNet-50 and DenseNet-121. From Table 2, we can see that our algorithm achieves $0.45\%$ higher accuracy on ResNet-50 than the state-of-the-art algorithms (DHP and PScratch) with the same $Frr$ . The improvement on DenseNet-121 is marginal compared with $\mathcal{W}$ -only, because our algorithm also prunes DenseNet-121 mainly along width dimension, which indicates that DenseNet-121's width is large and has much redundancy. By contrast, images do not need to be pruned. With a comprehensive consideration, our algorithm also deems that we should mainly prune filters of DenseNet-121 for acceleration. Therefore, it produces similar pruning results to filter-level pruning. However, the results do not imply that our algorithm is powerless. On the contrary, a pruning policy with a comprehensive consideration is always better than an arbitrary one, though they may produce similar results sometimes.
|
| 216 |
+
|
| 217 |
+
Comparison with NAS: Algorithms that balance the three dimensions (i.e., depth, width, and resolution) in a NAS manner are also compared, and the results are shown in Table 3. GPU-days is the most common metric to evaluate the search cost of NAS algorithms, which indicates natural days they spend if running with only one GPU. Both EfficientNet (Tan & Le, 2019) and TinyNet (Han et al., 2020) employ so many resources in searching the optimal $(d^{\star}, w^{\star}, r^{\star})$ , while we do not have enough GPUs to reproduce their searching process. Thus, the results of EfficientNet and TinyNet are both drawn from (Han et al., 2020), and their search costs are estimated through the number of models they trained. For example, training an EfficientNet for 300 epochs takes about 26 hours with $8 \times \mathrm{V}100$ GPUs, while TinyNet requires to train 60 EfficientNet models from scratch. Hence, its search cost is about 520 GPU days. Instead, our algorithm only spends about $\frac{1}{25}$ as much time as TinyNet on searching but achieves similar accuracy.
|
| 218 |
+
|
| 219 |
+
# 4.3. Ablation Study
|
| 220 |
+
|
| 221 |
+
Rank of $\Theta$ : In Section 3.2.2, our proposed MAP is
|
| 222 |
+
|
| 223 |
+
$$
|
| 224 |
+
\mathcal {F} (d, w, r; \Theta) = \sum_ {q = 1} ^ {\mathcal {R}} \mathcal {H} (d; \vec {s _ {q}}) \mathcal {H} (w; \vec {u _ {q}}) \mathcal {H} (r; \vec {v _ {q}}), \tag {10}
|
| 225 |
+
$$
|
| 226 |
+
|
| 227 |
+
where the rank of $\Theta$ is less than or equal to $\mathcal{R}$ . Experimentally, we find that $\mathcal{R} = 1$ works well in most cases. To further explore this interesting phenomenon, ResNets with different $(d,w,r)$ are trained on CIFAR-10. The base model (i.e., $(d,w,r) = (1.0,1.0,1.0)$ ) is ResNet-32 with
|
| 228 |
+
|
| 229 |
+
Table 1. Pruning results on CIFAR-10 and TinyImageNet. $\mathcal{D}$ , $\mathcal{W}$ , and $\mathcal{R}$ indicate whether the model will be pruned along depth, width, and resolution dimension, respectively. "Acc. Drop" means the accuracy loss induced by pruning (smaller is better). Results with $\dagger$ are drawn from original papers, and the others are run with their published code with slight modifications. Our algorithm achieves smaller accuracy losses than the others with similar $P_{rr}$ and $F_{rr}$ .
|
| 230 |
+
|
| 231 |
+
<table><tr><td>Dataset</td><td>Architecture</td><td>Algorithm</td><td>D</td><td>W</td><td>R</td><td>Baseline</td><td>Accuracy</td><td>Acc. Drop</td><td>Prr</td><td>Frr</td></tr><tr><td rowspan="23">CIFAR-10</td><td rowspan="7">ResNet-32</td><td>R-only (Howard et al., 2017)</td><td></td><td></td><td>✓</td><td>93.18%</td><td>90.19%</td><td>2.99%</td><td>-</td><td>0.52</td></tr><tr><td>W-only (Liu et al., 2017)</td><td></td><td>✓</td><td></td><td>93.18%</td><td>92.16%</td><td>1.02%</td><td>0.47</td><td>0.47</td></tr><tr><td>D-only DBP (Wang et al., 2019b)</td><td>✓</td><td></td><td></td><td>93.18%</td><td>92.65%</td><td>0.53%</td><td>0.28</td><td>0.48</td></tr><tr><td>GAL (Lin et al., 2019)</td><td>✓</td><td>✓</td><td></td><td>93.18%</td><td>91.72%</td><td>1.46%</td><td>0.39</td><td>0.50</td></tr><tr><td>FPGM† (He et al., 2019)</td><td></td><td>✓</td><td></td><td>92.63%</td><td>92.31%</td><td>0.32%</td><td>-</td><td>0.42</td></tr><tr><td>PScratch (Wang et al., 2020)</td><td></td><td>✓</td><td></td><td>93.18%</td><td>92.18%</td><td>1.00%</td><td>-</td><td>0.50</td></tr><tr><td>Ours</td><td>✓</td><td>✓</td><td>✓</td><td>93.18%</td><td>93.27%</td><td>-0.09%</td><td>0.38</td><td>0.49</td></tr><tr><td rowspan="9">ResNet-56</td><td>R-only (Howard et al., 2017)</td><td></td><td></td><td>✓</td><td>93.69%</td><td>92.00%</td><td>1.69%</td><td>-</td><td>0.51</td></tr><tr><td>W-only (Liu et al., 2017)</td><td></td><td>✓</td><td></td><td>93.69%</td><td>92.97%</td><td>0.72%</td><td>0.50</td><td>0.50</td></tr><tr><td>D-only DBP (Wang et al., 2019b)</td><td>✓</td><td></td><td></td><td>93.69%</td><td>93.27%</td><td>0.42%</td><td>0.40</td><td>0.52</td></tr><tr><td>GAL† (Lin et al., 2019)</td><td>✓</td><td>✓</td><td></td><td>93.26%</td><td>93.38%</td><td>-0.12%</td><td>0.12</td><td>0.38</td></tr><tr><td>FPGM† (He et al., 2019)</td><td></td><td>✓</td><td></td><td>93.59%</td><td>93.26%</td><td>0.33%</td><td>-</td><td>0.52</td></tr><tr><td>PScratch† (Wang et al., 2020)</td><td></td><td>✓</td><td></td><td>93.23%</td><td>93.05%</td><td>0.18%</td><td>-</td><td>0.50</td></tr><tr><td>HRank† (Lin et al., 2020)</td><td></td><td>✓</td><td></td><td>93.26%</td><td>93.17%</td><td>0.09%</td><td>0.42</td><td>0.50</td></tr><tr><td>DHP (Li et al., 2020)</td><td></td><td>✓</td><td></td><td>93.65%</td><td>93.58%</td><td>0.07%</td><td>0.42</td><td>0.49</td></tr><tr><td>Ours</td><td>✓</td><td>✓</td><td>✓</td><td>93.69%</td><td>93.76%</td><td>-0.07%</td><td>0.40</td><td>0.50</td></tr><tr><td rowspan="7">DenseNet-40</td><td>R-only (Howard et al., 2017)</td><td></td><td></td><td>✓</td><td>94.59%</td><td>92.88%</td><td>1.71%</td><td>-</td><td>0.53</td></tr><tr><td>W-only (Liu et al., 2017)</td><td></td><td>✓</td><td></td><td>94.59%</td><td>94.26%</td><td>0.33%</td><td>0.65</td><td>0.65</td></tr><tr><td>D-only DBP (Wang et al., 2019b)</td><td>✓</td><td></td><td></td><td>94.59%</td><td>94.02%</td><td>0.57%</td><td>0.60</td><td>0.46</td></tr><tr><td>GAL† (Lin et al., 2019)</td><td>✓</td><td>✓</td><td></td><td>94.81%</td><td>94.50%</td><td>0.31%</td><td>0.57</td><td>0.55</td></tr><tr><td>HRank† (Lin et al., 2020)</td><td></td><td>✓</td><td></td><td>94.81%</td><td>93.68%</td><td>1.13%</td><td>0.54</td><td>0.61</td></tr><tr><td>DHP† (Li et al., 2020)</td><td></td><td>✓</td><td></td><td>94.74%</td><td>93.94%</td><td>0.80%</td><td>0.36</td><td>0.62</td></tr><tr><td>Ours</td><td>✓</td><td>✓</td><td>✓</td><td>94.59%</td><td>94.54%</td><td>0.05%</td><td>0.66</td><td>0.66</td></tr><tr><td rowspan="18">TinyImageNet</td><td rowspan="6">ResNet-56</td><td>R-only (Howard et al., 2017)</td><td></td><td></td><td>✓</td><td>56.55%</td><td>54.64%</td><td>1.91%</td><td>-</td><td>0.49</td></tr><tr><td>W-only (Liu et al., 2017)</td><td></td><td>✓</td><td></td><td>56.55%</td><td>52.45%</td><td>4.10%</td><td>0.54</td><td>0.53</td></tr><tr><td>D-only DBP (Wang et al., 2019b)</td><td>✓</td><td></td><td></td><td>56.55%</td><td>55.57%</td><td>0.98%</td><td>0.25</td><td>0.53</td></tr><tr><td>GAL (Lin et al., 2019)</td><td>✓</td><td>✓</td><td></td><td>56.55%</td><td>55.87%</td><td>0.68%</td><td>0.32</td><td>0.52</td></tr><tr><td>DHP (Li et al., 2020)</td><td></td><td>✓</td><td></td><td>56.55%</td><td>55.82%</td><td>0.73%</td><td>0.46</td><td>0.55</td></tr><tr><td>Ours</td><td>✓</td><td>✓</td><td>✓</td><td>56.55%</td><td>56.04%</td><td>0.51%</td><td>0.34</td><td>0.59</td></tr><tr><td rowspan="6">ResNet-101</td><td>R-only (Howard et al., 2017)</td><td></td><td></td><td>✓</td><td>64.83%</td><td>55.48%</td><td>9.35%</td><td>-</td><td>0.75</td></tr><tr><td>W-only (Liu et al., 2017)</td><td></td><td>✓</td><td></td><td>64.83%</td><td>63.47%</td><td>1.36%</td><td>0.75</td><td>0.75</td></tr><tr><td>D-only DBP (Wang et al., 2019b)</td><td>✓</td><td></td><td></td><td>64.83%</td><td>61.35%</td><td>3.48%</td><td>0.76</td><td>0.77</td></tr><tr><td>GAL (Lin et al., 2019)</td><td>✓</td><td>✓</td><td></td><td>64.83%</td><td>64.33%</td><td>0.50%</td><td>0.45</td><td>0.76</td></tr><tr><td>DHP (Li et al., 2020)</td><td></td><td>✓</td><td></td><td>64.83%</td><td>64.82%</td><td>0.01%</td><td>0.50</td><td>0.75</td></tr><tr><td>Ours</td><td>✓</td><td>✓</td><td>✓</td><td>64.83%</td><td>65.27%</td><td>-0.44%</td><td>0.51</td><td>0.75</td></tr><tr><td rowspan="6">DenseNet-100</td><td>R-only (Howard et al., 2017)</td><td></td><td></td><td>✓</td><td>61.34%</td><td>56.97%</td><td>4.37%</td><td>-</td><td>0.75</td></tr><tr><td>W-only (Liu et al., 2017)</td><td></td><td>✓</td><td></td><td>61.34%</td><td>59.56%</td><td>1.78%</td><td>0.75</td><td>0.75</td></tr><tr><td>D-only DBP (Wang et al., 2019b)</td><td>✓</td><td></td><td></td><td>61.34%</td><td>58.44%</td><td>2.90%</td><td>0.65</td><td>0.78</td></tr><tr><td>GAL (Lin et al., 2019)</td><td>✓</td><td>✓</td><td></td><td>61.34%</td><td>59.03%</td><td>2.31%</td><td>0.78</td><td>0.70</td></tr><tr><td>DHP (Li et al., 2020)</td><td></td><td>✓</td><td></td><td>61.34%</td><td>59.40%</td><td>1.94%</td><td>0.73</td><td>0.73</td></tr><tr><td>Ours</td><td>✓</td><td>✓</td><td>✓</td><td>61.34%</td><td>60.22%</td><td>1.12%</td><td>0.73</td><td>0.75</td></tr></table>
|
| 232 |
+
|
| 233 |
+
images of size 32, and the results are plotted in Figure 3. Observations from the first three sub-figures are shown in their titles. We can deduce from these observations:
|
| 234 |
+
|
| 235 |
+
$$
|
| 236 |
+
\mathcal {F} (d, w, r; \Theta) \approx \mathcal {H} (d; \vec {s _ {q}}) \mathcal {H} (w; \vec {u _ {q}}) \mathcal {H} (r; \vec {v _ {q}}), \tag {11}
|
| 237 |
+
$$
|
| 238 |
+
|
| 239 |
+
which coincides with Eq. (10) once $\mathcal{R} = 1$ . In other words, three variables $(d,w,r)$ in the MAP can be approximately separated from each other. We also test the MAP with different $\mathcal{R}$ , as shown in the $4th$ sub-figure of Figure 3. The MAP with larger $\mathcal{R}$ yields similar $(d^{\star},w^{\star},r^{\star})$ to that of $\mathcal{R} = 1$ , which also indicates that $\mathcal{R} = 1$ is enough for obtaining a well-performed MAP.
|
| 240 |
+
|
| 241 |
+
Other Methods of Avoiding Overfitting: Besides restricting the rank of $\Theta$ , we also try two extra methods of avoiding overfitting, i.e., decreasing the degree $\mathcal{K}$ of polynomials and applying regression with regularization terms. The results are reported in Table 4. Specifically, a set of 13 items $(d_{n},w_{n},r_{n},a_{n})$ is used to fit the MAP, and a set of the other 80 items is used for evaluation. All data is collected with ResNet-56 trained on CIFAR-10. Training error and evaluation error are both reported. As we can see, normal polynomial regression induces severe overfitting and high evaluation loss, and $\ell_2$ -regularization has a limited effect on dealing with the overfitting issue. Still, lowering the degree of polynomials is not a wise choice because it makes the polynomials fail to converge even on training
|
| 242 |
+
|
| 243 |
+
Table 2. Pruning Results on ImageNet. The improvement on DenseNet-121 is marginal because of DenseNet-121's property. Detailed reasons are described in Section 4.2.
|
| 244 |
+
|
| 245 |
+
<table><tr><td>Algorithm</td><td>D</td><td>W</td><td>R</td><td>Accuracy</td><td>Prr</td><td>Frr</td></tr><tr><td colspan="7">ResNet-50 (76.15%)</td></tr><tr><td>R-only (Howard et al., 2017)</td><td></td><td></td><td>✓</td><td>71.56%</td><td>-</td><td>0.50</td></tr><tr><td>W-only (Liu et al., 2017)</td><td></td><td>✓</td><td></td><td>74.52%</td><td>0.50</td><td>0.50</td></tr><tr><td>DBP (Wang et al., 2019b)</td><td>✓</td><td></td><td></td><td>73.92%</td><td>0.56</td><td>0.50</td></tr><tr><td>GAL† (Lin et al., 2019)</td><td>✓</td><td>✓</td><td></td><td>71.95%</td><td>0.17</td><td>0.43</td></tr><tr><td>FPGM† (He et al., 2019)</td><td></td><td>✓</td><td></td><td>74.83%</td><td>-</td><td>0.54</td></tr><tr><td>PScratch† (Wang et al., 2020)</td><td></td><td>✓</td><td></td><td>75.45%</td><td>0.64</td><td>0.50</td></tr><tr><td>HRank† (Wang et al., 2020)</td><td></td><td>✓</td><td></td><td>74.98%</td><td>0.37</td><td>0.44</td></tr><tr><td>DHP (Li et al., 2020)</td><td></td><td>✓</td><td></td><td>75.45%</td><td>0.54</td><td>0.50</td></tr><tr><td>Ours</td><td>✓</td><td>✓</td><td>✓</td><td>75.90%</td><td>0.53</td><td>0.50</td></tr><tr><td colspan="7">DenseNet-121 (75.01%)</td></tr><tr><td>R-only (Howard et al., 2017)</td><td></td><td></td><td>✓</td><td>73.07%</td><td>-</td><td>0.51</td></tr><tr><td>W-only (Liu et al., 2017)</td><td></td><td>✓</td><td></td><td>73.58%</td><td>0.51</td><td>0.51</td></tr><tr><td>DBP (Wang et al., 2019b)</td><td>✓</td><td></td><td></td><td>68.08%</td><td>0.66</td><td>0.37</td></tr><tr><td>Ours</td><td>✓</td><td>✓</td><td>✓</td><td>73.68%</td><td>0.48</td><td>0.51</td></tr></table>
|
| 246 |
+
|
| 247 |
+
Table 3. Comparison with NAS-based model acceleration algorithms. They all take EfficientNet-B0 as the baseline model. GPU-days is measured with NVIDIA V100. Note that training an EfficientNet from scratch costs about 8.7 GPU days.
|
| 248 |
+
|
| 249 |
+
<table><tr><td>Algorithm</td><td>Params</td><td>FLOPs</td><td>Top-1/Top-5 Acc.</td><td>Search Cost (GPU days)</td></tr><tr><td>EfficientNet-B0</td><td>5.3M</td><td>387M</td><td>76.7%/93.2%</td><td>-</td></tr><tr><td>TinyNet-A†</td><td>5.1M</td><td>339M</td><td>76.8%/93.3%</td><td>~ 520</td></tr><tr><td>Ours</td><td>5.1M</td><td>314M</td><td>76.8%/93.3%</td><td>26</td></tr><tr><td>EfficientNet-B-1</td><td>3.6M</td><td>201M</td><td>74.7%/92.1%</td><td>-</td></tr><tr><td>TinyNet-B†</td><td>3.7M</td><td>202M</td><td>75.0%/92.2%</td><td>~ 520</td></tr><tr><td>Ours</td><td>3.6M</td><td>198M</td><td>75.2%/92.7%</td><td>24</td></tr><tr><td>EfficientNet-B-2</td><td>3.0M</td><td>98M</td><td>70.5%/89.5%</td><td>-</td></tr><tr><td>TinyNet-C†</td><td>2.5M</td><td>100M</td><td>71.2%/89.7%</td><td>~ 520</td></tr><tr><td>Ours</td><td>3.1M</td><td>98M</td><td>71.6%/89.9%</td><td>21</td></tr></table>
|
| 250 |
+
|
| 251 |
+
data. Instead, our specialized MAP shows a lower error rate on both training data $(0.08\%)$ and evaluation data $(0.25\%)$ .
|
| 252 |
+
|
| 253 |
+
Influence of Polynomials' Degree: Figure 4 shows the pruning results when adjusting the MAP's degree $\kappa$ . Especially, the polynomial regression degrades to linear regression when $\kappa = 1$ . It turns out that for polynomials with $\kappa \leq 2$ , the predicted optimal $(d^{\star}, w^{\star}, r^{\star})$ actually leads to a sub-optimal pruning policy, which indicates that the MAP is too simple to use. For polynomials with $\kappa \geq 3$ , all MAPs generate similar predictions about the optimal $(d, w, r)$ , i.e., (0.78, 0.82, 0.98) for ResNet-32 trained on CIFAR-10 and (0.65, 1.0, 0.63) for ResNet-56 trained on TinyImageNet. These results corroborate that our algorithm is relatively robust with respect to different degrees $\kappa$ , so practitioners do not need to choose the polynomial degree carefully.
|
| 254 |
+
|
| 255 |
+
# 4.4. Case Study
|
| 256 |
+
|
| 257 |
+
Visualization of Feature Maps for Different Pruning Policies: In order to further understand why pruning the
|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
|
| 261 |
+

|
| 262 |
+
|
| 263 |
+

|
| 264 |
+
Figure 3. Accuracies of ResNets with different $(d, w, r)$ trained on CIFAR-10 (the first three sub-figures) and the predicted optimal $(d^{\star}, w^{\star}, r^{\star})$ with different $\mathcal{R}$ in Eq. (10).
|
| 265 |
+
|
| 266 |
+

|
| 267 |
+
|
| 268 |
+
Table 4. MAP's regression results with lower degree or regularization. $\kappa$ means the highest degree for each variable in $(d,w,r)$ . $\ell_2$ is the coefficient of the regularization term, and 0 indicates no regularization.
|
| 269 |
+
|
| 270 |
+
<table><tr><td>Type</td><td>K</td><td>\( \ell_2 \)</td><td>Train Err.</td><td>Eval Err.</td></tr><tr><td rowspan="7">Normal Polynomial</td><td>1</td><td>0</td><td>2.66%</td><td>2.97%</td></tr><tr><td>2</td><td>0</td><td>1.62%</td><td>2.25%</td></tr><tr><td>3</td><td>0</td><td>0.28%</td><td>1.28%</td></tr><tr><td>5</td><td>0</td><td>0.02%</td><td>2.31%</td></tr><tr><td>5</td><td>\( 10^{-3} \)</td><td>0.02%</td><td>2.28%</td></tr><tr><td>10</td><td>0</td><td>0.01%</td><td>2.58%</td></tr><tr><td>10</td><td>\( 10^{-3} \)</td><td>0.02%</td><td>2.32%</td></tr><tr><td rowspan="2">Ours</td><td>3</td><td>0</td><td>0.14%</td><td>0.33%</td></tr><tr><td>5</td><td>0</td><td>0.08%</td><td>0.25%</td></tr></table>
|
| 271 |
+
|
| 272 |
+

|
| 273 |
+
|
| 274 |
+

|
| 275 |
+
|
| 276 |
+

|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
|
| 280 |
+

|
| 281 |
+
Figure 4. The predicted optimal $(d^{\star}, w^{\star}, r^{\star})$ and corresponding pruning results with different $\mathcal{K}$ . For all $\mathcal{K} \geq 3$ , the MAP's predicted optimal $(d^{\star}, w^{\star}, r^{\star})$ are very similar. Users do not need to bother to choose $\mathcal{K}$ carefully.
|
| 282 |
+
|
| 283 |
+

|
| 284 |
+
|
| 285 |
+

|
| 286 |
+
|
| 287 |
+

|
| 288 |
+
|
| 289 |
+

|
| 290 |
+
|
| 291 |
+

|
| 292 |
+
|
| 293 |
+

|
| 294 |
+
|
| 295 |
+

|
| 296 |
+
|
| 297 |
+

|
| 298 |
+
|
| 299 |
+

|
| 300 |
+
|
| 301 |
+

|
| 302 |
+
|
| 303 |
+

|
| 304 |
+
|
| 305 |
+

|
| 306 |
+
|
| 307 |
+

|
| 308 |
+
|
| 309 |
+

|
| 310 |
+
|
| 311 |
+

|
| 312 |
+
|
| 313 |
+

|
| 314 |
+
Figure 5. Visualization of last layer's feature maps from different models. The baseline model is a pre-trained ResNet-56. Four different pruning policies are tested, and our pruned model's feature maps look most like those of the baseline model.
|
| 315 |
+
|
| 316 |
+

|
| 317 |
+
|
| 318 |
+

|
| 319 |
+
|
| 320 |
+

|
| 321 |
+
|
| 322 |
+

|
| 323 |
+
|
| 324 |
+

|
| 325 |
+
|
| 326 |
+

|
| 327 |
+
|
| 328 |
+

|
| 329 |
+
|
| 330 |
+

|
| 331 |
+
|
| 332 |
+

|
| 333 |
+
|
| 334 |
+

|
| 335 |
+
|
| 336 |
+

|
| 337 |
+
|
| 338 |
+
Image
|
| 339 |
+
|
| 340 |
+
Baseline
|
| 341 |
+
|
| 342 |
+
Ours
|
| 343 |
+
|
| 344 |
+
$\mathcal{D} - \text{only}$ $\mathcal{W} - \text{only}$ $\mathcal{R} - \text{only}$
|
| 345 |
+
|
| 346 |
+
three dimensions simultaneously yields better results than pruning only one, Figure 5 compares the feature maps for models with different pruning policies. Specifically, all models are pruned from the same baseline model — ResNet-56 pre-trained on CIFAR-10. Input images are randomly chosen from the Internet. For visualization, we extract the last convolutional layer's feature maps and compute their mean absolute values across channels. Figure 5 shows that the feature maps our pruned model outputs look most similar to those of the original model. This finding reveals that our algorithm preserves most information of the original model by pruning the three dimensions comprehensively.
|
| 347 |
+
|
| 348 |
+
# 5. Conclusion
|
| 349 |
+
|
| 350 |
+
In this paper, we proposed a novel pruning framework which prunes a pre-trained model along three dimensions, i.e., depth, width, and resolution, comprehensively. Remarkably, our framework can determine the optimal values for these three dimensions through modeling the relationships between the model's accuracy and depth/width/resolution into a polynomial regression and subsequently solving an optimization problem. The extensive experimental results demonstrate that the proposed pruning algorithm outperforms state-of-the-art pruning algorithms under a comparable computational budget. In contrast with NAS-based methods, we generated the pruned models that are superior to the NAS-searched models with a much reduced computational cost.
|
| 351 |
+
|
| 352 |
+
# Acknowledgements
|
| 353 |
+
|
| 354 |
+
This work was supported in part by The National Key Research and Development Program of China (Grant No. 2018AAA0101400), in part by The National Nature Science Foundation of China (Grant Nos: 62036009, U1909203,
|
| 355 |
+
|
| 356 |
+
61936006), and in part by Innovation Capability Support Program of Shaanxi (Program No. 2021TD-05).
|
| 357 |
+
|
| 358 |
+
# References
|
| 359 |
+
|
| 360 |
+
Ba, J. and Caruana, R. Do deep nets really need to be deep? In NeurIPS, 2014.
|
| 361 |
+
Bishop, C. M. Pattern Recognition and Machine Learning. Springer, 2006.
|
| 362 |
+
Bourbaki, N. Elements of mathematics: Algebra. Springer, 2003.
|
| 363 |
+
Frankle, J. and Carbin, M. The lottery ticket hypothesis: Finding sparse, trainable neural networks. In ICLR, 2019.
|
| 364 |
+
Gao, Y., Bai, H., Jie, Z., Ma, J., Jia, K., and Liu, W. MTL-NAS: task-agnostic neural architecture search towards general-purpose multi-task learning. In CVPR, pp. 11540-11549. IEEE, 2020.
|
| 365 |
+
Han, K., Wang, Y., Zhang, Q., Zhang, W., Xu, C., and Zhang, T. Model rubiks cube: Twisting resolution, depth and width for tinynets. NeurIPS, 2020.
|
| 366 |
+
Han, S., Mao, H., and Dally, W. J. Deep compression: Compressing deep neural network with pruning, trained quantization and huffman coding. In ICLR, 2016.
|
| 367 |
+
He, C., Ye, H., Shen, L., and Zhang, T. Milenas: Efficient neural architecture search via mixed-level reformulation. In CVPR, pp. 11990-11999. IEEE, 2020.
|
| 368 |
+
He, K., Zhang, X., Ren, S., and Sun, J. Deep residual learning for image recognition. In CVPR, 2016.
|
| 369 |
+
He, Y., Kang, G., Dong, X., Fu, Y., and Yang, Y. Soft filter pruning for accelerating deep convolutional neural networks. In *IJCAI*, 2018.
|
| 370 |
+
He, Y., Liu, P., Wang, Z., Hu, Z., and Yang, Y. Filter pruning via geometric median for deep convolutional neural networks acceleration. In CVPR, 2019.
|
| 371 |
+
Hinton, G. E., Vinyals, O., and Dean, J. Distilling the knowledge in a neural network. CoRR, abs/1503.02531, 2015.
|
| 372 |
+
Howard, A., Pang, R., Adam, H., Le, Q. V., Sandler, M., Chen, B., Wang, W., Chen, L., Tan, M., Chu, G., Vasudevan, V., and Zhu, Y. Searching for mobilenetv3. In ICCV, pp. 1314-1324. IEEE, 2019.
|
| 373 |
+
Howard, A. G., Zhu, M., Chen, B., Kalenichenko, D., Wang, W., Weyand, T., Andreetto, M., and Adam, H. Mobilenets: Efficient convolutional neural networks for mobile vision applications. CoRR, abs/1704.04861, 2017.
|
| 374 |
+
|
| 375 |
+
Huang, G., Liu, Z., van der Maaten, L., and Weinberger, K. Q. Densely connected convolutional networks. In CVPR, 2017.
|
| 376 |
+
Huang, Y., Cheng, Y., Bapna, A., First, O., Chen, D., Chen, M. X., Lee, H., Ngiam, J., Le, Q. V., Wu, Y., and Chen, Z. Gpipe: Efficient training of giant neural networks using pipeline parallelism. In Wallach, H. M., Larochelle, H., Beygelzimer, A., d'Alché-Buc, F., Fox, E. B., and Garnett, R. (eds.), NeurIPS, 2019.
|
| 377 |
+
Kang, M. and Han, B. Operation-aware soft channel pruning using differentiable masks. In ICML, 2020.
|
| 378 |
+
Kolda, T. G. and Bader, B. W. Tensor decompositions and applications. Society for Industrial and Applied Mathematics Review, 2009.
|
| 379 |
+
Krizhevsky, A., Hinton, G., et al. Learning multiple layers of features from tiny images. Technical report, Citeseer, 2009.
|
| 380 |
+
Lee, N., Ajanthan, T., and Torr, P. H. S. Snip: single-shot network pruning based on connection sensitivity. In ICLR, 2019.
|
| 381 |
+
Li, H., Kadav, A., Durdanovic, I., Samet, H., and Graf, H. P. Pruning filters for efficient convnets. In ICLR, 2017.
|
| 382 |
+
Li, Y., Gu, S., Zhang, K., Gool, L. V., and Timofte, R. DHP: differentiable meta pruning via hypernetworks. In ECCV, 2020.
|
| 383 |
+
Lin, M., Ji, R., Wang, Y., Zhang, Y., Zhang, B., Tian, Y., and Shao, L. Hrank: Filter pruning using high-rank feature map. In CVPR, 2020.
|
| 384 |
+
Lin, S., Ji, R., Yan, C., Zhang, B., Cao, L., Ye, Q., Huang, F., and Doermann, D. S. Towards optimal structured CNN pruning via generative adversarial learning. In CVPR, 2019.
|
| 385 |
+
Liu, H., Simonyan, K., and Yang, Y. DARTS: differentiable architecture search. In ICLR. OpenReview.net, 2019.
|
| 386 |
+
Liu, Z., Li, J., Shen, Z., Huang, G., Yan, S., and Zhang, C. Learning efficient convolutional networks through network slimming. In ICCV, 2017.
|
| 387 |
+
Luo, J., Zhang, H., Zhou, H., Xie, C., Wu, J., and Lin, W. Thinet: Pruning CNN filters for a thinner net. TPAMI, 2019.
|
| 388 |
+
Molchanov, P., Tyree, S., Karras, T., Aila, T., and Kautz, J. Pruning convolutional neural networks for resource efficient inference. In ICLR, 2017.
|
| 389 |
+
Paszke, A., Gross, S., Chintala, S., Chanan, G., Yang, E., DeVito, Z., Lin, Z., Desmaison, A., Antiga, L., and Lerer, A. Automatic differentiation in pytorch. 2017.
|
| 390 |
+
|
| 391 |
+
Pham, H., Guan, M. Y., Zoph, B., Le, Q. V., and Dean, J. Efficient neural architecture search via parameter sharing. In Dy, J. G. and Krause, A. (eds.), ICML, 2018.
|
| 392 |
+
Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M. S., Berg, A. C., and Li, F. Imagenet large scale visual recognition challenge. IJCV, 2015.
|
| 393 |
+
Sehwag, V., Wang, S., Mittal, P., and Jana, S. HYDRA: pruning adversarially robust neural networks. In Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., and Lin, H. (eds.), NeurIPS, 2020.
|
| 394 |
+
Simonyan, K. and Zisserman, A. Very deep convolutional networks for large-scale image recognition. In Bengio, Y. and LeCun, Y. (eds.), ICLR, 2015.
|
| 395 |
+
Tan, M. and Le, Q. V. Efficientnet: Rethinking model scaling for convolutional neural networks. In ICML, 2019.
|
| 396 |
+
Tian, Y., Shen, L., Shen, L., Su, G., Li, Z., and Liu, W. Alphagan: Fully differentiable architecture search for generative adversarial networks. CoRR, abs/2006.09134, 2020.
|
| 397 |
+
Wang, W., Fu, C., Guo, J., Cai, D., and He, X. COP: customized deep model compression via regularized correlation-based filter-level pruning. In *IJCAI*, 2019a.
|
| 398 |
+
Wang, W., Zhao, S., Chen, M., Hu, J., Cai, D., and Liu, H. DBP: discrimination based block-level pruning for deep model acceleration. CoRR, abs/1912.10178, 2019b.
|
| 399 |
+
Wang, Y., Zhang, X., Xie, L., Zhou, J., Su, H., Zhang, B., and Hu, X. Pruning from scratch. In AAAI, 2020.
|
| 400 |
+
Wen, W., Wu, C., Wang, Y., Chen, Y., and Li, H. Learning structured sparsity in deep neural networks. In NIPS, 2016.
|
| 401 |
+
Wu, J., Zhang, Q., and Xu, G. Tiny imagenet visual recognition challenge. Technical report, 2017.
|
| 402 |
+
Ye, M., Gong, C., Nie, L., Zhou, D., Klivans, A., and Liu, Q. Good subnetworks provably exist: Pruning via greedy forward selection. In ICML, 2020a.
|
| 403 |
+
Ye, X., Dai, P., Luo, J., Guo, X., Qi, Y., Yang, J., and Chen, Y. Accelerating CNN training by pruning activation gradients. In Vedaldi, A., Bischof, H., Brox, T., and Frahm, J. (eds.), ECCV, 2020b.
|
| 404 |
+
Zoph, B. and Le, Q. V. Neural architecture search with reinforcement learning. In ICLR, 2017.
|
acceleratecnnsfromthreedimensionsacomprehensivepruningframework/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d005356aba6a1b6b4f33b07cbe5248cf5ecbc88963be7aa4432f406c2041cff0
|
| 3 |
+
size 724264
|
acceleratecnnsfromthreedimensionsacomprehensivepruningframework/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6cfd3f9ab82d3f2e0c9b8cd1b4cc508b5e96789c45fd86ded44ffe63e54def79
|
| 3 |
+
size 545302
|
acceleratedalgorithmsforsmoothconvexconcaveminimaxproblemswitho1k2rateonsquaredgradientnorm/3de512a1-b47a-480c-b003-57ceeb2f54b6_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:645852bf771c966ca3752ee12432ffef75b911e4ce02567a7d6543a4cc6c06bc
|
| 3 |
+
size 112443
|
acceleratedalgorithmsforsmoothconvexconcaveminimaxproblemswitho1k2rateonsquaredgradientnorm/3de512a1-b47a-480c-b003-57ceeb2f54b6_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c2fc125aa52d63ec8a6ddd351aa08ff5ee6eb646bcebc907c244f564e8772758
|
| 3 |
+
size 142894
|
acceleratedalgorithmsforsmoothconvexconcaveminimaxproblemswitho1k2rateonsquaredgradientnorm/3de512a1-b47a-480c-b003-57ceeb2f54b6_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f3d5d3f5f52a62dea31d8c3b04e171de1e1522c10a5b4701c17fd502bb26a4fc
|
| 3 |
+
size 3423061
|
acceleratedalgorithmsforsmoothconvexconcaveminimaxproblemswitho1k2rateonsquaredgradientnorm/full.md
ADDED
|
@@ -0,0 +1,600 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerated Algorithms for Smooth Convex-Concave Minimax Problems with $\mathcal{O}(1 / k^2)$ Rate on Squared Gradient Norm
|
| 2 |
+
|
| 3 |
+
TaeHo Yoon<sup>1</sup> Ernest K. Ryu<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
In this work, we study the computational complexity of reducing the squared gradient magnitude for smooth minimax optimization problems. First, we present algorithms with accelerated $\mathcal{O}(1 / k^2)$ last-iterate rates, faster than the existing $\mathcal{O}(1 / k)$ or slower rates for extragradient, Popov, and gradient descent with anchoring. The acceleration mechanism combines extragradient steps with anchoring and is distinct from Nesterov's acceleration. We then establish optimality of the $\mathcal{O}(1 / k^2)$ rate through a matching lower bound.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
Minimax optimization problems, or minimax games, of the form
|
| 12 |
+
|
| 13 |
+
$$
|
| 14 |
+
\underset {\mathbf {x} \in \mathbb {R} ^ {n}} {\text {m i n i m i z e}} \underset {\mathbf {y} \in \mathbb {R} ^ {m}} {\text {m a x i m i z e}} \mathbf {L} (\mathbf {x}, \mathbf {y}) \tag {1}
|
| 15 |
+
$$
|
| 16 |
+
|
| 17 |
+
have recently gained significant interest in the optimization and machine learning communities due to their application in adversarial training (Goodfellow et al., 2015; Madry et al., 2018) and generative adversarial networks (GANs) (Goodfellow et al., 2014).
|
| 18 |
+
|
| 19 |
+
Prior works on minimax optimization often consider compact domains $X, Y$ for $\mathbf{x}, \mathbf{y}$ and use the duality gap
|
| 20 |
+
|
| 21 |
+
$$
|
| 22 |
+
\operatorname {E r r} _ {\mathrm {g a p}} (\mathbf {x}, \mathbf {y}) := \sup _ {\tilde {\mathbf {y}} \in Y} \mathbf {L} (\mathbf {x}, \tilde {\mathbf {y}}) - \inf _ {\tilde {\mathbf {x}} \in X} \mathbf {L} (\tilde {\mathbf {x}}, \mathbf {y})
|
| 23 |
+
$$
|
| 24 |
+
|
| 25 |
+
to quantify suboptimality of algorithms' iterates in solving (1). However, while it is a natural analog of minimization error for minimax problems, the duality gap can be difficult to measure directly in practice, and it is unclear how to generalize the notion to non-convex-concave problems.
|
| 26 |
+
|
| 27 |
+
In contrast, the squared gradient magnitude $\| \nabla \mathbf{L}(\mathbf{x},\mathbf{y})\|^2$ when $\mathbf{L}$ is differentiable, is a more directly observable
|
| 28 |
+
|
| 29 |
+
value for quantifying suboptimality. Moreover, the notion is meaningful for differentiable non-convex-concave minimax games. Interestingly, very few prior works have analyzed convergence rates on the gradient norm for minimax problems, and the optimal convergence rate or corresponding algorithms were hitherto unknown.
|
| 30 |
+
|
| 31 |
+
Contributions. In this work, we introduce the extra anchored gradient (EAG) algorithms for smooth convex-concave minimax problems and establish an accelerated $\| \nabla \mathbf{L}(\mathbf{z}^k)\|^2 \leq \mathcal{O}(R^2 / k^2)$ rate, where $R$ is the Lipschitz constant of $\nabla \mathbf{L}$ . The rate improves upon the $\mathcal{O}(R^2 / k)$ rates of prior algorithms and is the first $\mathcal{O}(R^2 / k^2)$ rate in this setup. We then provide a matching $\Omega(R^2 / k^2)$ complexity lower bound for gradient-based algorithms and thereby establish optimality of EAG.
|
| 32 |
+
|
| 33 |
+
Beyond establishing the optimal complexity, our results provide the following observations. First, different suboptimality measures lead to materially different acceleration mechanisms, since reducing the duality gap is done optimally by the extragradient algorithm (Nemirovski, 2004; Nemirovsky, 1992). Also, since our optimal accelerated convergence rate is on the non-ergodic last iterate, neither averaging nor keeping track of the best iterate is necessary for optimally reducing the gradient magnitude in the deterministic setup.
|
| 34 |
+
|
| 35 |
+
# 1.1. Preliminaries and notation
|
| 36 |
+
|
| 37 |
+
We say a saddle function $\mathbf{L}\colon \mathbb{R}^n\times \mathbb{R}^m\to \mathbb{R}$ is convex-concave if $\mathbf{L}(\mathbf{x},\mathbf{y})$ is convex in $\mathbf{x}\in \mathbb{R}^n$ for all fixed $\mathbf{y}\in \mathbb{R}^{m}$ and $\mathbf{L}(\mathbf{x},\mathbf{y})$ is concave in $\mathbf{y}\in \mathbb{R}^{m}$ for all fixed $\mathbf{x}\in \mathbb{R}^n$ . We say $(\mathbf{x}^{\star},\mathbf{y}^{\star})$ is a saddle point of $\mathbf{L}$ if $\mathbf{L}(\mathbf{x}^{\star},\mathbf{y})\leq \mathbf{L}(\mathbf{x}^{\star},\mathbf{y}^{\star})\leq \mathbf{L}(\mathbf{x},\mathbf{y}^{\star})$ for all $\mathbf{x}\in \mathbb{R}^n$ and $\mathbf{y}\in \mathbb{R}^{m}$ . Solutions to the minimax problem (1) are defined to be saddle points of $\mathbf{L}$ . For notational conciseness, write $\mathbf{z} = (\mathbf{x},\mathbf{y})$ . When $\mathbf{L}$ is differentiable, define the saddle operator of $\mathbf{L}$ at $\mathbf{z} = (\mathbf{x},\mathbf{y})$ by
|
| 38 |
+
|
| 39 |
+
$$
|
| 40 |
+
\mathbf {G} _ {\mathbf {L}} (\mathbf {z}) = \left[ \begin{array}{c} \nabla_ {\mathbf {x}} \mathbf {L} (\mathbf {x}, \mathbf {y}) \\ - \nabla_ {\mathbf {y}} \mathbf {L} (\mathbf {x}, \mathbf {y}) \end{array} \right]. \tag {2}
|
| 41 |
+
$$
|
| 42 |
+
|
| 43 |
+
(When clear from the context, we drop the subscript $\mathbf{L}$ .) The saddle operator is monotone (Rockafellar, 1970), i.e.,
|
| 44 |
+
|
| 45 |
+
$\langle \mathbf{G}(\mathbf{z}_1) - \mathbf{G}(\mathbf{z}_2),\mathbf{z}_1 - \mathbf{z}_2\rangle \geq 0$ for all $\mathbf{z}_1,\mathbf{z}_2\in \mathbb{R}^n\times \mathbb{R}^m$ . We say $\mathbf{L}$ is $R$ -smooth if $\mathbf{G}_{\mathbf{L}}$ is $R$ -Lipschitz continuous. Note that $\nabla \mathbf{L}\neq \mathbf{G}_{\mathbf{L}}$ due to the sign change in the $\mathbf{y}$ gradient, but $\| \nabla \mathbf{L}\| = \| \mathbf{G}_{\mathbf{L}}\|$ , and we use the two forms interchangeably. Because $\mathbf{z}^{\star} = (\mathbf{x}^{\star},\mathbf{y}^{\star})$ is a saddle point of $\mathbf{L}$ if and only if $0 = \mathbf{G}_{\mathbf{L}}(\mathbf{z}^{\star})$ , the squared gradient magnitude is a natural measure of suboptimality at a given point for smooth convex-concave problems.
|
| 46 |
+
|
| 47 |
+
# 1.2. Prior work
|
| 48 |
+
|
| 49 |
+
Extragradient-type algorithms. The first main component of our proposed algorithm is the extragradient (EG) algorithm of Korpelevich (1977). EG and its variants, including the algorithm of Popov (1980), have been studied in the context of saddle point and variational inequality problems and have appeared in the mathematical programming literature (Solodov & Svaiter, 1999; Tseng, 2000; Noor, 2003; Censor et al., 2011; Lyashko et al., 2011; Malitsky & Semenov, 2014; Malitsky, 2015; 2020). More recently in the machine learning literature, similar ideas such as optimism (Chiang et al., 2012; Rakhlin & Sridharan, 2013a), prediction (Yadav et al., 2018), and negative momentum (Gidel et al., 2019; Zhang et al., 2020) have been presented and used in the context of multi-player games (Daskalakis et al., 2011; Rakhlin & Sridharan, 2013b; Syrgkanis et al., 2015; Antonakopoulos et al., 2021) and GANs (Gidel et al., 2018; Mertikopoulos et al., 2019; Liang & Stokes, 2019; Peng et al., 2020).
|
| 50 |
+
|
| 51 |
+
$\mathcal{O}(R / k)$ rates on duality gap. For minimax problems with an $R$ -smooth $\mathbf{L}$ and bounded domains for $\mathbf{x}$ and $\mathbf{y}$ , Nemirovski (2004) presented the mirror-prox algorithm generalizing EG and established ergodic $\mathcal{O}(R / k)$ convergence rates on $\mathrm{Err}_{\mathrm{gap}}$ . Nesterov (2007); Monteiro & Svaiter (2010; 2011) extended the $\mathcal{O}(R / k)$ complexity analysis to the case of unbounded domains. Mokhtari et al. (2020b) showed that the optimistic descent converges at $\mathcal{O}(R / k)$ rate with respect to $\mathrm{Err}_{\mathrm{gap}}$ . Since there exists $\Omega(R / k)$ complexity lower bound on $\mathrm{Err}_{\mathrm{gap}}$ for black-box gradient-based minimax optimization algorithms (Nemirovsky, 1992; Nemirovski, 2004), in terms of duality gap, these algorithms are order-optimal.
|
| 52 |
+
|
| 53 |
+
Convergence rates on squared gradient norm. Using standard arguments (e.g. (Solodov & Svaiter, 1999, Lemma 2.3)), one can show $\min_{i=0,\dots,k} \|\mathbf{G}(\mathbf{z}^i)\|^2 \leq \mathcal{O}(R^2/k)$ convergence rate of EG, provided that $\mathbf{L}$ is $R$ -smooth. Ryu et al. (2019) showed that optimistic descent algorithms also attain $\mathcal{O}(R^2/k)$ convergence in terms of the best iterate and proposed simultaneous gradient descent with anchoring, which pulls iterates toward the initial point $\mathbf{z}^0$ , and established $\mathcal{O}(R^2/k^{2-2p})$ convergence rates in terms of squared gradient norm of the last iterate (where $p > \frac{1}{2}$ is
|
| 54 |
+
|
| 55 |
+
an algorithm parameter; see Section A). Notably, anchoring resembles the Halpern iteration (Halpern, 1967; Lieder, 2020), which was used in Diakonikolas (2020) to develop a regularization-based algorithm with near-optimal (optimal up to logarithmic factors) complexity with respect to the gradient norm of the last iterate. Anchoring turns out to be the second main component of the acceleration; combining EG steps with anchoring, we obtain the optimal last-iterate convergence rate of $\mathcal{O}(R^2 /k^2)$ .
|
| 56 |
+
|
| 57 |
+
Structured minimax problems. For structured minimax problems of the form
|
| 58 |
+
|
| 59 |
+
$$
|
| 60 |
+
\mathbf {L} (\mathbf {x}, \mathbf {y}) = f (\mathbf {x}) + \langle \mathbf {A x}, \mathbf {y} \rangle - g (\mathbf {y}),
|
| 61 |
+
$$
|
| 62 |
+
|
| 63 |
+
where $f, g$ are convex and $\mathbf{A}$ is a linear operator, primal-dual splitting algorithms (Chambolle & Pock, 2011; Condat, 2013; Vū, 2013; Yan, 2018; Ryu & Yin, 2021) and Nesterov's smoothing technique (Nesterov, 2005a;b) have also been extensively studied (Chen et al., 2014; He & Monteiro, 2016). Notably, when $g$ is of "simple" form, Neterov's smoothing framework achieves an accelerated rate $\mathcal{O}\left(\frac{\|\mathbf{A}\|}{k} +\frac{L_f}{k^2}\right)$ on duality gap. Additionally, Chambolle & Pock (2016) have shown that splitting algorithms can achieve $\mathcal{O}(1 / k^2)$ or linear convergence rates under appropriate strong convexity and smoothness assumptions on $f$ and $g$ , although they rely on proximal operations. Kolossoski & Monteiro (2017); Hamedani & Aybat (2018); Zhao (2019); Alkousa et al. (2020) generalized these accelerated algorithms to the setting where the coupling term $\langle \mathbf{Ax},\mathbf{y}\rangle$ is replaced by non-bilinear convex-concave function $\Phi (\mathbf{x},\mathbf{y})$ .
|
| 64 |
+
|
| 65 |
+
Complexity lower bounds. Ouyang & Xu (2021) presented a $\Omega\left(\frac{\|\mathbf{A}\|}{k} + \frac{L_f}{k^2}\right)$ complexity lower bound on duality gap for gradient-based algorithms solving bilinear minimax problems with proximable $g$ , establishing optimality of Nesterov's smoothing. Zhang et al. (2019) presented lower bounds for strongly-convex-strongly-concave problems. Golowich et al. (2020) proved that with the narrower class of 1-SCLI algorithms, which includes EG but not EAG, the squared gradient norm of the last iterate cannot be reduced beyond $\mathcal{O}(R^2 / k)$ in $R$ -smooth minimax problems. These approaches are aligned with the information-based complexity analysis, introduced in (Nemirovsky & Yudin, 1983) and thoroughly studied in (Nemirovsky, 1991; 1992) for the special case of linear equations.
|
| 66 |
+
|
| 67 |
+
Other problem setups. Nesterov (2009) and Nedic & Ozdaglar (2009) proposed subgradient algorithms for nonsmooth minimax problems. Stochastic minimax and variational inequality problems were studied in (Nemirovski et al., 2009; Juditsky et al., 2011; Lan, 2012; Ghadimi & Lan, 2012; 2013; Chen et al., 2014; 2017; Hsieh et al., 2019). Strongly monotone variational inequality problems
|
| 68 |
+
|
| 69 |
+
or strongly-convex-strongly-concave minimax problems were studied in (Tseng, 1995; Nesterov & Scrimali, 2011; Gidel et al., 2018; Mokhtari et al., 2020a; Lin et al., 2020b; Wang & Li, 2020; Zhang et al., 2020; Azizian et al., 2020). Recently, minimax problems with objectives that are either strongly convex or nonconvex in one variable were studied in (Rafique et al., 2018; Thekumparampil et al., 2019; Jin et al., 2019; Nouiehed et al., 2019; Ostrovskii et al., 2020; Lin et al., 2020a;b; Lu et al., 2020; Wang & Li, 2020; Yang et al., 2020; Chen et al., 2021). Minimax optimization of composite objectives with smooth and nonsmooth-but-proximable convex-concave functions were studied in (Tseng, 2000; Csetnek et al., 2019; Malitsky & Tam, 2020; Bui & Combettes, 2021).
|
| 70 |
+
|
| 71 |
+
# 2. Accelerated algorithms: Extra anchored gradient
|
| 72 |
+
|
| 73 |
+
We now present two accelerated EAG algorithms that are qualitatively very similar but differ in the choice of step-sizes. The two algorithms present a tradeoff between the simplicity of the step-size and the simplicity of the convergence proof; one algorithm has a varying step-size but a simpler convergence proof, while the other algorithm has a simpler constant step-size but has a more complicated proof.
|
| 74 |
+
|
| 75 |
+
# 2.1. Description of the algorithms
|
| 76 |
+
|
| 77 |
+
The proposed extra anchored gradient (EAG) algorithms have the following general form:
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\begin{array}{l} \mathbf {z} ^ {k + 1 / 2} = \mathbf {z} ^ {k} + \beta_ {k} \left(\mathbf {z} ^ {0} - \mathbf {z} ^ {k}\right) - \alpha_ {k} \mathbf {G} \left(\mathbf {z} ^ {k}\right) \\ \mathbf {z} ^ {k + 1} = \mathbf {z} ^ {k} + \beta_ {k} \left(\mathbf {z} ^ {0} - \mathbf {z} ^ {k}\right) - \alpha_ {k} \mathbf {G} \left(\mathbf {z} ^ {k + 1 / 2}\right) \tag {3} \\ \end{array}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
for $k \geq 0$ , where $\mathbf{z}^0 \in \mathbb{R}^n \times \mathbb{R}^m$ is the starting point. We use $\mathbf{G}$ defined in (2) rather than describing the $\mathbf{x}$ - and $\mathbf{y}$ -updates separately to keep the notation concise. We call $\alpha_k > 0$ step-sizes and $\beta_k \in [0,1)$ anchoring coefficients. Note that when $\beta_k = 0$ , EAG coincides with the unconstrained extragradient algorithm.
|
| 84 |
+
|
| 85 |
+
The simplest choice of $\{\alpha_k\}_{k\geq 0}$ is the constant one. Together with the choice $\beta_{k} = \frac{1}{k + 2}$ (which we clarify later), we get the following simpler algorithm.
|
| 86 |
+
|
| 87 |
+
# EAG with constant step-size (EAG-C)
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\begin{array}{l} \mathbf {z} ^ {k + 1 / 2} = \mathbf {z} ^ {k} + \frac {1}{k + 2} (\mathbf {z} ^ {0} - \mathbf {z} ^ {k}) - \alpha \mathbf {G} (\mathbf {z} ^ {k}) \\ \mathbf {z} ^ {k + 1} = \mathbf {z} ^ {k} + \frac {1}{k + 2} \left(\mathbf {z} ^ {0} - \mathbf {z} ^ {k}\right) - \alpha \mathbf {G} \left(\mathbf {z} ^ {k + 1 / 2}\right) \\ \end{array}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
where $\alpha > 0$ is fixed.
|
| 94 |
+
|
| 95 |
+
Theorem 1. Assume $\mathbf{L}\colon \mathbb{R}^n\times \mathbb{R}^m\to \mathbb{R}$ is an $R$ -smooth convex-concave function with a saddle point $\mathbf{z}^{\star}$ . Assume
|
| 96 |
+
|
| 97 |
+
$\alpha > 0$ satisfies
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\begin{array}{l} 1 - 3 \alpha R - \alpha^ {2} R ^ {2} - \alpha^ {3} R ^ {3} \geq 0 \tag {4} \\ 1 - 8 \alpha R + \alpha^ {2} R ^ {2} - 2 \alpha^ {3} R ^ {3} \geq 0. \\ \end{array}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
Then EAG-C converges with rate
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\| \nabla \mathbf {L} (\mathbf {z} ^ {k}) \| ^ {2} \leq \frac {4 (1 + \alpha R + \alpha^ {2} R ^ {2})}{\alpha^ {2} (1 + \alpha R)} \frac {\| \mathbf {z} ^ {0} - \mathbf {z} ^ {\star} \| ^ {2}}{(k + 1) ^ {2}}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
for $k\geq 0$
|
| 110 |
+
|
| 111 |
+
Corollary 1. In the setup of Theorem 1, $\alpha \in \left(0,\frac{1}{8R}\right]$ satisfies (4), and the particular choice $\alpha = \frac{1}{8R}$ yields
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\| \nabla \mathbf {L} (\mathbf {z} ^ {k}) \| ^ {2} \leq \frac {2 6 0 R ^ {2} \| \mathbf {z} ^ {0} - \mathbf {z} ^ {\star} \| ^ {2}}{(k + 1) ^ {2}}
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
for $k\geq 0$
|
| 118 |
+
|
| 119 |
+
While EAG-C is simple in its form, its convergence proof (presented in the appendix) is complicated. Furthermore, the constant 260 in Corollary 1 seems large and raises the question of whether it could be reduced. These issues, to some extent, are addressed by the following alternative version of EAG.
|
| 120 |
+
|
| 121 |
+
# EAG with varying step-size (EAG-V)
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\begin{array}{l} \mathbf {z} ^ {k + 1 / 2} = \mathbf {z} ^ {k} + \frac {1}{k + 2} \left(\mathbf {z} ^ {0} - \mathbf {z} ^ {k}\right) - \alpha_ {k} \mathbf {G} \left(\mathbf {z} ^ {k}\right) \\ \mathbf {z} ^ {k + 1} = \mathbf {z} ^ {k} + \frac {1}{k + 2} \left(\mathbf {z} ^ {0} - \mathbf {z} ^ {k}\right) - \alpha_ {k} \mathbf {G} \left(\mathbf {z} ^ {k + 1 / 2}\right), \\ \end{array}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
where $\alpha_0\in \left(0,\frac{1}{R}\right)$ and
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
\begin{array}{l} \alpha_ {k + 1} = \frac {\alpha_ {k}}{1 - \alpha_ {k} ^ {2} R ^ {2}} \left(1 - \frac {(k + 2) ^ {2}}{(k + 1) (k + 3)} \alpha_ {k} ^ {2} R ^ {2}\right) \\ = \alpha_ {k} \left(1 - \frac {1}{(k + 1) (k + 3)} \frac {\alpha_ {k} ^ {2} R ^ {2}}{1 - \alpha_ {k} ^ {2} R ^ {2}}\right) \tag {5} \\ \end{array}
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
for $k\geq 0$
|
| 134 |
+
|
| 135 |
+
As the recurrence relation (5) may seem unfamiliar, we provide the following lemma describing the behavior of the resulting sequence.
|
| 136 |
+
|
| 137 |
+
Lemma 1. If $\alpha_0\in \left(0,\frac{3}{4R}\right)$ , then the sequence $\{\alpha_{k}\}_{k\geq 0}$ of (5) monotonically decreases to a positive limit. In particular, when $\alpha_0 = \frac{0.618}{R}$ , we have $\lim_{k\to \infty}\alpha_k\approx \frac{0.437}{R}$ .
|
| 138 |
+
|
| 139 |
+
We now state the convergence results for EAG-V.
|
| 140 |
+
|
| 141 |
+
Theorem 2. Assume $\mathbf{L}\colon \mathbb{R}^n\times \mathbb{R}^m\to \mathbb{R}$ is an $R$ -smooth convex-concave function with a saddle point $\mathbf{z}^{\star}$ . Assume $\alpha_0\in \left(0,\frac{3}{4R}\right)$ , and define $\alpha_{\infty} = \lim_{k\to \infty}\alpha_{k}$ . Then EAG-V converges with rate
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\| \nabla \mathbf {L} (\mathbf {z} ^ {k}) \| ^ {2} \leq \frac {4 (1 + \alpha_ {0} \alpha_ {\infty} R ^ {2})}{\alpha_ {\infty} ^ {2}} \frac {\| \mathbf {z} ^ {0} - \mathbf {z} ^ {\star} \| ^ {2}}{(k + 1) (k + 2)}
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
for $k\geq 0$
|
| 148 |
+
|
| 149 |
+
Corollary 2. EAG-V with $\alpha_0 = \frac{0.618}{R}$ satisfies
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\| \nabla \mathbf {L} (\mathbf {z} ^ {k}) \| ^ {2} \leq \frac {2 7 R ^ {2} \| \mathbf {z} ^ {0} - \mathbf {z} ^ {\star} \| ^ {2}}{(k + 1) (k + 2)}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
for $k\geq 0$
|
| 156 |
+
|
| 157 |
+
# 2.2. Proof outline
|
| 158 |
+
|
| 159 |
+
We now outline the convergence analysis for EAG-V, whose proof is simpler than that of EAG-C. The key ingredient of the proof is a Lyapunov analysis with a nonincreasing Lyapunov function, the $V_{k}$ of the following lemma.
|
| 160 |
+
|
| 161 |
+
Lemma 2. Let $\{\beta_k\}_{k\geq 0}\subseteq (0,1)$ and $\alpha_0\in (0,\frac{1}{R})$ be given. Define the sequences $\{A_{k}\}_{k\geq 0},\{B_{k}\}_{k\geq 0}$ and $\{\alpha_{k}\}_{\geq 0}$ by the recurrence relations
|
| 162 |
+
|
| 163 |
+
$$
|
| 164 |
+
A _ {k} = \frac {\alpha_ {k}}{2 \beta_ {k}} B _ {k} \tag {6}
|
| 165 |
+
$$
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
B _ {k + 1} = \frac {B _ {k}}{1 - \beta_ {k}} \tag {7}
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
$$
|
| 172 |
+
\alpha_ {k + 1} = \frac {\alpha_ {k} \beta_ {k + 1} \left(1 - \alpha_ {k} ^ {2} R ^ {2} - \beta_ {k} ^ {2}\right)}{\beta_ {k} \left(1 - \beta_ {k}\right) \left(1 - \alpha_ {k} ^ {2} R ^ {2}\right)} \tag {8}
|
| 173 |
+
$$
|
| 174 |
+
|
| 175 |
+
for $k \geq 0$ , where $B_0 = 1$ . Suppose that $\alpha_k \in (0, \frac{1}{R})$ holds for all $k \geq 0$ . Assume $\mathbf{L}$ is $R$ -smooth and convex-concave. Then the sequence $\{V_k\}_{k \geq 0}$ defined as
|
| 176 |
+
|
| 177 |
+
$$
|
| 178 |
+
V _ {k} := A _ {k} \left\| \mathbf {G} \left(\mathbf {z} ^ {k}\right) \right\| ^ {2} + B _ {k} \langle \mathbf {G} \left(\mathbf {z} ^ {k}\right), \mathbf {z} ^ {k} - \mathbf {z} ^ {0} \rangle \tag {9}
|
| 179 |
+
$$
|
| 180 |
+
|
| 181 |
+
for EAG iterations in (3) is nonincreasing.
|
| 182 |
+
|
| 183 |
+
In Lemma 2, the choice of $\beta_{k} = \frac{1}{k + 2}$ leads to $B_{k} = k + 1$ , $A_{k} = \frac{\alpha_{k}(k + 2)(k + 1)}{2}$ , and (5). Why the Lyapunov function of Lemma 2 leads to the convergence guarantee of Theorem 2 may not be immediately obvious. The following proof provides the analysis.
|
| 184 |
+
|
| 185 |
+
Proof of Theorem 2. Let $\beta_{k} = \frac{1}{k + 2}$ as specified by the definition of EAG-V. By Lemma 2, the quantity $V_{k}$ defined by (9) is nonincreasing in $k$ . Therefore,
|
| 186 |
+
|
| 187 |
+
$$
|
| 188 |
+
V _ {k} \leq \dots \leq V _ {0} = \alpha_ {0} \| \mathbf {G} (\mathbf {z} ^ {0}) \| ^ {2} \leq \alpha_ {0} R ^ {2} \| \mathbf {z} ^ {0} - \mathbf {z} ^ {\star} \| ^ {2}.
|
| 189 |
+
$$
|
| 190 |
+
|
| 191 |
+
Next, we have
|
| 192 |
+
|
| 193 |
+
$$
|
| 194 |
+
\begin{array}{l} V _ {k} = A _ {k} \left\| \mathbf {G} \left(\mathbf {z} ^ {k}\right) \right\| ^ {2} + B _ {k} \left\langle \mathbf {G} \left(\mathbf {z} ^ {k}\right), \mathbf {z} ^ {k} - \mathbf {z} ^ {0} \right\rangle \\ \geq A _ {k} \| \mathbf {G} (\mathbf {z} ^ {k}) \| ^ {2} + B _ {k} \langle \mathbf {G} (\mathbf {z} ^ {k}), \mathbf {z} ^ {\star} - \mathbf {z} ^ {0} \rangle \\ \geq A _ {k} \left\| \mathbf {G} \left(\mathbf {z} ^ {k}\right) \right\| ^ {2} - \frac {A _ {k}}{2} \left\| \mathbf {G} \left(\mathbf {z} ^ {k}\right) \right\| ^ {2} - \frac {B _ {k} ^ {2}}{2 A _ {k}} \left\| \mathbf {z} ^ {0} - \mathbf {z} ^ {\star} \right\| ^ {2} \\ = \frac {\alpha_ {k}}{4} (k + 1) (k + 2) \| \mathbf {G} (\mathbf {z} ^ {k}) \| ^ {2} \\ - \frac {k + 1}{\alpha_ {k} (k + 2)} \left\| \mathbf {z} ^ {0} - \mathbf {z} ^ {\star} \right\| ^ {2} \\ \stackrel {\mathrm {(d)}} {\geq} \frac {\alpha_ {\infty}}{4} (k + 1) (k + 2) \| \mathbf {G} (\mathbf {z} ^ {k}) \| ^ {2} - \frac {1}{\alpha_ {\infty}} \| \mathbf {z} ^ {0} - \mathbf {z} ^ {\star} \| ^ {2}, \\ \end{array}
|
| 195 |
+
$$
|
| 196 |
+
|
| 197 |
+
where (a) follows from the monotonicity inequality $\langle \mathbf{G}(\mathbf{z}^k),\mathbf{z}^k -\mathbf{z}^\star \rangle \geq 0$ (b) follows from Young's inequality, (c) follows from plugging in $A_{k} = \frac{\alpha_{k}(k + 1)(k + 2)}{2}$ and $B_{k} = k + 1$ , and (d) follows from Lemma 1 $(\alpha_{k}\downarrow \alpha_{\infty})$ Reorganize to get
|
| 198 |
+
|
| 199 |
+
$$
|
| 200 |
+
\begin{array}{l} \frac {\alpha_ {\infty}}{4} (k + 1) (k + 2) \| \mathbf {G} (\mathbf {z} ^ {k}) \| ^ {2} \leq V _ {k} + \frac {1}{\alpha_ {\infty}} \| \mathbf {z} ^ {0} - \mathbf {z} ^ {\star} \| ^ {2} \\ \leq \left(\alpha_ {0} R ^ {2} + \frac {1}{\alpha_ {\infty}}\right) \| \mathbf {z} ^ {0} - \mathbf {z} ^ {\star} \| ^ {2}, \\ \end{array}
|
| 201 |
+
$$
|
| 202 |
+
|
| 203 |
+
and divide both sides by $\frac{\alpha_{\infty}}{4} (k + 1)(k + 2)$
|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
|
| 207 |
+
# 2.3. Discussion of further generalizations
|
| 208 |
+
|
| 209 |
+
The algorithms and results of Sections 2.1 and 2.2 remain valid when we replace $\mathbf{G}$ with an $R$ -Lipschitz continuous monotone operator; neither the definition of the EAG algorithms nor any part of the proofs of Theorems 1 and 2 utilize properties of saddle functions beyond the monotonicity of their subdifferentials.
|
| 210 |
+
|
| 211 |
+
For EAG-C, the step-size conditions (4) in Theorem 1 can be relaxed to accommodate larger values of $\alpha$ . However, we do not pursue such generalizations to keep the already complicated and arduous analysis of EAG-C manageable. Also, larger step-sizes are more naturally allowed in EAG-V and Theorem 2. Finally, although (4) holds for values of $\alpha$ up to $\frac{0.1265}{R}$ , we present a slightly smaller range $\left(0, \frac{1}{8R}\right]$ in Corollary 1 for simplicity.
|
| 212 |
+
|
| 213 |
+
For EAG-V, the choice $\beta_{k} = \frac{1}{k + 2}$ was obtained by roughly, but not fully, optimizing the bound on EAG-V originating from Lemma 2. If one chooses $\beta_{k} = \frac{1}{k + \delta}$ with $\delta > 1$ , then (6) and (7) become
|
| 214 |
+
|
| 215 |
+
$$
|
| 216 |
+
A _ {k} = \frac {\alpha_ {k} (k + \delta) (k + \delta - 1)}{2 (\delta - 1)}, \quad B _ {k} = \frac {k + \delta - 1}{\delta - 1}.
|
| 217 |
+
$$
|
| 218 |
+
|
| 219 |
+
As the proof of Theorem 2 illustrates, linear growth of $B_{k}$ and quadratic growth of $A_{k}$ leads to $\mathcal{O}(1 / k^2)$ convergence of $\| \mathbf{G}(\mathbf{z}^{k})\|^{2}$ . The value $\alpha_0 = \frac{0.618}{R}$ in Lemma 1 and Corollary 2 was obtained by numerically minimizing the constant $\frac{4}{\alpha_{\infty}^2}\left(1 + \alpha_0\alpha_{\infty}R^2\right)$ in Theorem 2 in the case of $\delta = 2$ . The choice $\delta = 2$ , however, is not optimal. Indeed, the constant 27 of Corollary 2 can be reduced to 24.44 with $(\delta^{\star},\alpha_{0}^{\star})\approx (2.697,0.690 / R)$ , which was obtained by numerically optimizing over $\delta$ and $\alpha_0$ . Finally, there is a possibility that a choice of $\beta_{k}$ not in the form of $\beta_{k} = \frac{1}{k + \delta}$ leads to an improved constant.
|
| 220 |
+
|
| 221 |
+
In the end, we choose to present EAG-C and EAG-V with the simple choice $\beta_{k} = \frac{1}{k + 2}$ . As we establish in Section 3, the EAG algorithms are optimal up to a constant.
|
| 222 |
+
|
| 223 |
+
# 3. Optimality of EAG via a matching complexity lower bound
|
| 224 |
+
|
| 225 |
+
Upon seeing an accelerated algorithm, it is natural to ask whether the algorithm is optimal. In this section, we present a $\Omega(R^2 / k^2)$ complexity lower bound for the class of deterministic gradient-based algorithms for smooth convex-concave minimax problems. This result establishes that EAG is indeed optimal.
|
| 226 |
+
|
| 227 |
+
For the class of smooth minimax optimization problems, a deterministic algorithm $\mathcal{A}$ produces iterates $(\mathbf{x}^k,\mathbf{y}^k) = \mathbf{z}^k$ for $k\geq 1$ given a starting point $(\mathbf{x}^0,\mathbf{y}^0) = \mathbf{z}^0$ and a saddle function $\mathbf{L}$ , and we write $\mathbf{z}^k = \mathcal{A}(\mathbf{z}^0,\dots ,\mathbf{z}^{k - 1};\mathbf{L})$ for $k\geq 1$ . Define $\mathfrak{A}_{\mathrm{sim}}$ as the class of algorithms satisfying
|
| 228 |
+
|
| 229 |
+
$$
|
| 230 |
+
\mathbf {z} ^ {k} \in \mathbf {z} ^ {0} + \operatorname {s p a n} \left\{\mathbf {G} _ {\mathbf {L}} \left(\mathbf {z} ^ {0}\right), \dots , \mathbf {G} _ {\mathbf {L}} \left(\mathbf {z} ^ {k - 1}\right) \right\}, \tag {10}
|
| 231 |
+
$$
|
| 232 |
+
|
| 233 |
+
and $\mathfrak{A}_{\mathrm{sep}}$ as the class of algorithms satisfying
|
| 234 |
+
|
| 235 |
+
$$
|
| 236 |
+
\begin{array}{l} \mathbf {x} ^ {k} \in \mathbf {x} ^ {0} + \operatorname {s p a n} \left\{\nabla_ {\mathbf {x}} \mathbf {L} \left(\mathbf {x} ^ {0}, \mathbf {y} ^ {0}\right), \dots , \nabla_ {\mathbf {x}} \mathbf {L} \left(\mathbf {x} ^ {k - 1}, \mathbf {y} ^ {k - 1}\right) \right\} \\ \mathbf {y} ^ {k} \in \mathbf {y} ^ {0} + \operatorname {s p a n} \left\{\nabla_ {\mathbf {y}} \mathbf {L} \left(\mathbf {x} ^ {0}, \mathbf {y} ^ {0}\right), \dots , \nabla_ {\mathbf {y}} \mathbf {L} \left(\mathbf {x} ^ {k - 1}, \mathbf {y} ^ {k - 1}\right) \right\}. \tag {11} \\ \end{array}
|
| 237 |
+
$$
|
| 238 |
+
|
| 239 |
+
To clarify, algorithms in $\mathfrak{A}_{\mathrm{sim}}$ access and utilize the x- and y-subgradients simultaneously. So $\mathfrak{A}_{\mathrm{sim}}$ contains simultaneous gradient descent, extragradient, Popov, and EAG (if we also count intermediate sequences $\mathbf{z}^{k + 1 / 2}$ as algorithms' iterates). On the other hand, algorithms in $\mathfrak{A}_{\mathrm{sep}}$ can access and utilize the x- and y-subgradients separately. So $\mathfrak{A}_{\mathrm{sim}} \subset \mathfrak{A}_{\mathrm{sep}}$ , and alternating gradient descent-ascent belongs to $\mathfrak{A}_{\mathrm{sep}}$ but not to $\mathfrak{A}_{\mathrm{sim}}$ .
|
| 240 |
+
|
| 241 |
+
In this section, we present a complexity lower bound that applies to all algorithms in $\mathfrak{A}_{\mathrm{sep}}$ , not just the algorithms in $\mathfrak{A}_{\mathrm{sim}}$ . Although EAG-C and EAG-V are in $\mathfrak{A}_{\mathrm{sim}}$ , we consider the broader class $\mathfrak{A}_{\mathrm{sep}}$ to rule out the possibility that separately updating the x- and y-variables provides an improvement beyond a constant factor.
|
| 242 |
+
|
| 243 |
+
We say $\mathbf{L}(\mathbf{x},\mathbf{y})$ is biaffine if it is an affine function of $\mathbf{x}$ for any fixed $\mathbf{y}$ and an affine function of $\mathbf{y}$ for any fixed $\mathbf{x}$ . Biaffine functions are, of course, convex-concave. We first establish a complexity lower bound on minimax optimization problems with biaffine loss functions.
|
| 244 |
+
|
| 245 |
+
Theorem 3. Let $k \geq 0$ be fixed. For any $n \geq k + 2$ , there exists an $R$ -smooth biaffine function $\mathbf{L}$ on $\mathbb{R}^n \times \mathbb{R}^n$ for which
|
| 246 |
+
|
| 247 |
+
$$
|
| 248 |
+
\left\| \nabla \mathbf {L} \left(\mathbf {z} ^ {k}\right) \right\| ^ {2} \geq \frac {R ^ {2} \left\| \mathbf {z} ^ {0} - \mathbf {z} ^ {\star} \right\| ^ {2}}{\left(2 \left\lfloor k / 2 \right\rfloor + 1\right) ^ {2}} \tag {12}
|
| 249 |
+
$$
|
| 250 |
+
|
| 251 |
+
holds for any algorithm in $\mathfrak{A}_{sep}$ , where $\lfloor \cdot \rfloor$ is the floor function and $\mathbf{z}^{\star}$ is the saddle point of $\mathbf{L}$ closest to $\mathbf{z}^{0}$ . Moreover, this lower bound is optimal in the sense that it cannot be improved with biaffine functions.
|
| 252 |
+
|
| 253 |
+
Since smooth biaffine functions are special cases of smooth convex-concave functions, Theorem 3 implies the optimality of EAG applied to smooth convex-concave minimax optimization problems.
|
| 254 |
+
|
| 255 |
+
Corollary 3. For $R$ -smooth convex-concave minimax problems, an algorithm in $\mathfrak{A}_{sep}$ cannot attain a worst-case convergence rate better than
|
| 256 |
+
|
| 257 |
+
$$
|
| 258 |
+
\frac {R ^ {2} \left\| \mathbf {z} ^ {0} - \mathbf {z} ^ {\star} \right\| ^ {2}}{(2 \left\lfloor k / 2 \right\rfloor + 1) ^ {2}}
|
| 259 |
+
$$
|
| 260 |
+
|
| 261 |
+
with respect to $\| \nabla \mathbf{L}(\mathbf{z}^k)\|^2$ . Since EAG-C and EAG-V have rates $\mathcal{O}(R^2\| \mathbf{z}^0 -\mathbf{z}^\star \| ^2 /k^2)$ , they are optimal, up to a constant factor, in $\mathfrak{A}_{sep}$ .
|
| 262 |
+
|
| 263 |
+
# 3.1. Outline of the worst-case biaffine construction
|
| 264 |
+
|
| 265 |
+
Consider biaffine functions of the form
|
| 266 |
+
|
| 267 |
+
$$
|
| 268 |
+
\mathbf {L} (\mathbf {x}, \mathbf {y}) = \langle \mathbf {A x} - \mathbf {b}, \mathbf {y} - \mathbf {c} \rangle ,
|
| 269 |
+
$$
|
| 270 |
+
|
| 271 |
+
where $\mathbf{A} \in \mathbb{R}^{n \times n}$ and $\mathbf{b}, \mathbf{c} \in \mathbb{R}^n$ . Then, $\nabla_{\mathbf{x}} \mathbf{L}(\mathbf{x}, \mathbf{y}) = \mathbf{A}^{\top} (\mathbf{y} - \mathbf{c})$ , $\nabla_{\mathbf{y}} \mathbf{L}(\mathbf{x}, \mathbf{y}) = \mathbf{A} \mathbf{x} - \mathbf{b}$ , $\mathbf{G}$ is $\| \mathbf{A} \|$ -Lipschitz, and solutions to
|
| 272 |
+
|
| 273 |
+
$$
|
| 274 |
+
\underset {\mathbf {x} \in X} {\text {m i n i m i z e}} \underset {\mathbf {y} \in Y} {\text {m a x i m i z e}} \left\langle \mathbf {A} \mathbf {x} - \mathbf {b}, \mathbf {y} - \mathbf {c} \right\rangle
|
| 275 |
+
$$
|
| 276 |
+
|
| 277 |
+
are characterized by $\mathbf{A}\mathbf{x} - \mathbf{b} = 0$ and $\mathbf{A}^{\intercal}(\mathbf{y} - \mathbf{c}) = 0$ .
|
| 278 |
+
|
| 279 |
+
Through translation, we may assume without loss of generality that $\mathbf{x}^0 = 0, \mathbf{y}^0 = 0$ . In this case, (11) becomes
|
| 280 |
+
|
| 281 |
+
$$
|
| 282 |
+
\begin{array}{l} \mathbf {x} ^ {k} \in \operatorname {s p a n} \left\{\mathbf {A} ^ {\intercal} \mathbf {c}, \mathbf {A} ^ {\intercal} (\mathbf {A} \mathbf {A} ^ {\intercal}) \mathbf {c}, \dots , \mathbf {A} ^ {\intercal} (\mathbf {A} \mathbf {A} ^ {\intercal}) ^ {\lfloor \frac {k - 1}{2} \rfloor} \mathbf {c} \right\} \\ + \operatorname {s p a n} \left\{\mathbf {A} ^ {\intercal} \mathbf {b}, \mathbf {A} ^ {\intercal} \left(\mathbf {A} \mathbf {A} ^ {\intercal}\right) \mathbf {b}, \dots , \mathbf {A} ^ {\intercal} \left(\mathbf {A} \mathbf {A} ^ {\intercal}\right) ^ {\lfloor \frac {k}{2} \rfloor - 1} \mathbf {b} \right\} \\ \end{array}
|
| 283 |
+
$$
|
| 284 |
+
|
| 285 |
+
$$
|
| 286 |
+
\begin{array}{l} \mathbf {y} ^ {k} \in \operatorname {s p a n} \{b, (\mathbf {A A} ^ {\intercal}) \mathbf {b}, \dots , (\mathbf {A A} ^ {\intercal}) ^ {\lfloor \frac {k - 1}{2} \rfloor} \mathbf {b} \} \\ + \operatorname {s p a n} \left\{\mathbf {A} \mathbf {A} ^ {\mathsf {T}} \mathbf {c}, \dots , \left(\mathbf {A} \mathbf {A} ^ {\mathsf {T}}\right) ^ {\lfloor \frac {k}{2} \rfloor} \mathbf {c} \right\} \tag {13} \\ \end{array}
|
| 287 |
+
$$
|
| 288 |
+
|
| 289 |
+
for $k \geq 2$ . (We detail these arguments in the appendix.) Furthermore let $\mathbf{A} = \mathbf{A}^{\intercal}$ and $\mathbf{b} = \mathbf{A}^{\intercal}\mathbf{c} = \mathbf{A}\mathbf{c}$ . Then the characterization of $\mathfrak{A}_{\mathrm{sep}}$ further simplifies to
|
| 290 |
+
|
| 291 |
+
$$
|
| 292 |
+
\mathbf {x} ^ {k}, \mathbf {y} ^ {k} \in \mathcal {K} _ {k - 1} (\mathbf {A}; \mathbf {b}) := \operatorname {s p a n} \{\mathbf {b}, \mathbf {A b}, \mathbf {A} ^ {2} \mathbf {b}, \dots , \mathbf {A} ^ {k - 1} \mathbf {b} \}.
|
| 293 |
+
$$
|
| 294 |
+
|
| 295 |
+
Note that $\mathcal{K}_{k - 1}(\mathbf{A};\mathbf{b})$ is the order- $(k - 1)$ Krylov subspace.
|
| 296 |
+
|
| 297 |
+
Consider the following lemma. Its proof, deferred to the appendix, combines arguments from Nemirovsky (1991; 1992).
|
| 298 |
+
|
| 299 |
+
Lemma 3. Let $R > 0$ , $k \geq 0$ , and $n \geq k + 2$ . Then there exists $\mathbf{A} = \mathbf{A}^{\mathsf{T}} \in \mathbb{R}^{n \times n}$ such that $\| \mathbf{A} \| \leq R$ and $\mathbf{b} \in \mathcal{R}(\mathbf{A})$ , satisfying
|
| 300 |
+
|
| 301 |
+
$$
|
| 302 |
+
\left\| \mathbf {A} \mathbf {x} - \mathbf {b} \right\| ^ {2} \geq \frac {R ^ {2} \left\| \mathbf {x} ^ {\star} \right\| ^ {2}}{\left(2 \lfloor k / 2 \rfloor + 1\right) ^ {2}} \tag {14}
|
| 303 |
+
$$
|
| 304 |
+
|
| 305 |
+
for any $\mathbf{x} \in \mathcal{K}_{k-1}(\mathbf{A};\mathbf{b})$ , where $\mathbf{x}^{\star}$ is the minimum norm solution to the equation $\mathbf{Ax} = \mathbf{b}$ .
|
| 306 |
+
|
| 307 |
+
Take $\mathbf{A}$ and $\mathbf{b}$ as in Lemma 3 and $\mathbf{c} = \mathbf{x}^{\star}$ . Then $\mathbf{z}^{\star} = (\mathbf{x}^{\star},\mathbf{x}^{\star})$ is the saddle point of $\mathbf{L}(\mathbf{x},\mathbf{y}) = \langle \mathbf{Ax} - \mathbf{b},\mathbf{y} - \mathbf{c}\rangle$ with minimum norm. Finally,
|
| 308 |
+
|
| 309 |
+
$$
|
| 310 |
+
\begin{array}{l} \left\| \nabla \mathbf {L} (\mathbf {x} ^ {k}, \mathbf {y} ^ {k}) \right\| ^ {2} = \left\| \mathbf {A} ^ {\intercal} (\mathbf {y} ^ {k} - \mathbf {c}) \right\| ^ {2} + \left\| \mathbf {A} \mathbf {x} ^ {k} - \mathbf {b} \right\| ^ {2} \\ = \| \mathbf {A} \mathbf {y} ^ {k} - \mathbf {b} \| ^ {2} + \| \mathbf {A} \mathbf {x} ^ {k} - \mathbf {b} \| ^ {2} \\ \geq \frac {R ^ {2} \| \mathbf {x} ^ {\star} \| ^ {2}}{(2 \lfloor k / 2 \rfloor + 1) ^ {2}} + \frac {R ^ {2} \| \mathbf {x} ^ {\star} \| ^ {2}}{(2 \lfloor k / 2 \rfloor + 1) ^ {2}} \\ = \frac {R ^ {2} \| \mathbf {z} ^ {\star} - \mathbf {z} ^ {0} \| ^ {2}}{(2 \lfloor k / 2 \rfloor + 1) ^ {2}}, \\ \end{array}
|
| 311 |
+
$$
|
| 312 |
+
|
| 313 |
+
for any $\mathbf{x}^k, \mathbf{y}^k \in \mathcal{K}_{k-1}(\mathbf{A}; \mathbf{b})$ . This completes the construction of the biaffine $\mathbf{L}$ of Theorem 3.
|
| 314 |
+
|
| 315 |
+
# 3.2. Optimal complexity lower bound
|
| 316 |
+
|
| 317 |
+
We now formalize the notion of complexity lower bounds. This formulation will allow us to precisely state and prove the second statement of Theorem 3 regarding the optimality of the lower bound.
|
| 318 |
+
|
| 319 |
+
Let $\mathcal{F}$ be a function class, $\mathcal{P}_{\mathcal{F}} = \{\mathcal{P}_f\}_{f\in \mathcal{F}}$ a class of optimization problems (with some common form), and $\mathcal{E}(\cdot ;\mathcal{P}_f)$ a suboptimality measure for the problem $\mathcal{P}_f$ . Define the worst-case complexity of an algorithm $\mathcal{A}$ for $\mathcal{P}_{\mathcal{F}}$ at the $k$ -th iteration given the initial condition $\| \mathbf{z}^0 -\mathbf{z}^\star \| \leq D$ , as
|
| 320 |
+
|
| 321 |
+
$$
|
| 322 |
+
\mathcal{C}\left(\mathcal{A};\mathcal{P}_{\mathcal{F}},D,k\right):= \sup_{\substack{\mathbf{z}^{0}\in B(\mathbf{z}^{\star};D)\\ f\in \mathcal{F}}}\mathcal{E}\left(\mathbf{z}^{k};\mathcal{P}_{f}\right),
|
| 323 |
+
$$
|
| 324 |
+
|
| 325 |
+
where $\mathbf{z}^j = \mathcal{A}(\mathbf{z}^0,\dots ,\mathbf{z}^{j - 1};f)$ for $j = 1,\ldots ,k$ and $B(\mathbf{z};D)$ denotes the closed ball of radius $D$ centered at $\mathbf{z}$ . The optimal complexity lower bound with respect to an algorithm class $\mathfrak{A}$ is
|
| 326 |
+
|
| 327 |
+
$$
|
| 328 |
+
\begin{array}{l} \mathcal {C} \left(\mathfrak {A}; \mathcal {P} _ {\mathcal {F}}, D, k\right) := \inf _ {\mathcal {A} \in \mathfrak {A}} \mathcal {C} \left(\mathcal {A}; \mathcal {P} _ {\mathcal {F}}, D, k\right) \\ = \inf_{\mathcal{A}\in \mathfrak{A}}\sup_{\substack{\mathbf{z}^{0}\in B(\mathbf{z}^{\star};D)\\ f\in \mathcal{F}}}\mathcal{E}\left(\mathbf{z}^{k};\mathcal{P}_{f}\right). \\ \end{array}
|
| 329 |
+
$$
|
| 330 |
+
|
| 331 |
+
A complexity lower bound is a lower bound on the optimal complexity lower bound.
|
| 332 |
+
|
| 333 |
+
Let $\mathcal{L}_R(\mathbb{R}^n\times \mathbb{R}^m)$ be the class of $R$ -smooth convex-concave functions on $\mathbb{R}^n\times \mathbb{R}^m$ , $\mathcal{P}_{\mathbf{L}}$ the minimax problem (1), and $\mathcal{E}(\mathbf{z};\mathcal{P}_{\mathbf{L}}) = \| \nabla \mathbf{L}(\mathbf{z})\| ^2$ . With this notation, the results of Section 2 can be expressed as
|
| 334 |
+
|
| 335 |
+
$$
|
| 336 |
+
\mathcal {C} \left(\operatorname {E A G}; \mathcal {P} _ {\mathcal {L} _ {R} \left(\mathbb {R} ^ {n} \times \mathbb {R} ^ {m}\right)}, D, k\right) = \mathcal {O} \left(\frac {R ^ {2} D ^ {2}}{k ^ {2}}\right).
|
| 337 |
+
$$
|
| 338 |
+
|
| 339 |
+
Let $\mathcal{L}_R^{\mathrm{biaff}}(\mathbb{R}^n\times \mathbb{R}^m)$ be the class of $R$ -smooth biaffine functions on $\mathbb{R}^n\times \mathbb{R}^m$ . Then the first statement of Theorem 3, the existence of $\mathbf{L}$ , can be expressed as
|
| 340 |
+
|
| 341 |
+
$$
|
| 342 |
+
\mathcal {C} \left(\mathfrak {A} _ {\text {s e p}}; \mathcal {P} _ {\mathcal {L} _ {R} ^ {\text {b i a f f}} \left(\mathbb {R} ^ {n} \times \mathbb {R} ^ {n}\right)}, D, k\right) \geq \frac {R ^ {2} D ^ {2}}{\left(2 \lfloor k / 2 \rfloor + 1\right) ^ {2}} \tag {15}
|
| 343 |
+
$$
|
| 344 |
+
|
| 345 |
+
for $n\geq k + 2$
|
| 346 |
+
|
| 347 |
+
As an aside, the argument of Corollary 3 can be expressed as: for any $\mathcal{A} \in \mathfrak{A}_{\mathrm{sep}}$ , we have
|
| 348 |
+
|
| 349 |
+
$$
|
| 350 |
+
\begin{array}{l} \mathcal {C} \left(\mathcal {A}; \mathcal {P} _ {\mathcal {L} _ {R} \left(\mathbb {R} ^ {n} \times \mathbb {R} ^ {n}\right)}, D, k\right) \geq \mathcal {C} \left(\mathfrak {A} _ {\text {s e p}}; \mathcal {P} _ {\mathcal {L} _ {R} \left(\mathbb {R} ^ {n} \times \mathbb {R} ^ {n}\right)}, D, k\right) \\ \geq \mathcal {C} \left(\mathfrak {A} _ {\text {s e p}}; \mathcal {P} _ {\mathcal {L} _ {R} ^ {\text {b i a f f}} \left(\mathbb {R} ^ {n} \times \mathbb {R} ^ {n}\right)}, D, k\right) \\ \geq \frac {R ^ {2} D ^ {2}}{(2 \lfloor k / 2 \rfloor + 1) ^ {2}}. \\ \end{array}
|
| 351 |
+
$$
|
| 352 |
+
|
| 353 |
+
The first inequality follows from $\mathcal{A} \in \mathfrak{A}_{\mathrm{sep}}$ , the second from $\mathcal{L}_R^{\mathrm{biaff}} \subset \mathcal{L}_R$ , and the third from Theorem 3.
|
| 354 |
+
|
| 355 |
+
Optimality of lower bound of Theorem 3. Using above notations, our goal is to prove that for $n \geq k + 2$ ,
|
| 356 |
+
|
| 357 |
+
$$
|
| 358 |
+
\mathcal {C} \left(\mathfrak {A} _ {\text {s e p}}; \mathcal {P} _ {\mathcal {L} _ {R} ^ {\text {b i a f f}} \left(\mathbb {R} ^ {n} \times \mathbb {R} ^ {n}\right)}, D, k\right) = \frac {R ^ {2} D ^ {2}}{\left(2 \lfloor k / 2 \rfloor + 1\right) ^ {2}}. \tag {16}
|
| 359 |
+
$$
|
| 360 |
+
|
| 361 |
+
We establish this claim with the chain of inequalities:
|
| 362 |
+
|
| 363 |
+
$$
|
| 364 |
+
\begin{array}{l} \frac {R ^ {2} D ^ {2}}{(2 \lfloor k / 2 \rfloor + 1) ^ {2}} \leq \mathcal {C} \left(\mathfrak {A} _ {\text {s e p}}; \mathcal {P} _ {\mathcal {L} _ {R} ^ {\text {b i a f f}} (\mathbb {R} ^ {n} \times \mathbb {R} ^ {n})}, D, k\right) (17) \\ \leq \mathcal {C} \left(\mathfrak {A} _ {\text {s i m}}; \mathcal {P} _ {\mathcal {L} _ {R} ^ {\text {b i a f f}} \left(\mathbb {R} ^ {n} \times \mathbb {R} ^ {n}\right)}, D, k\right) (18) \\ \leq \mathcal {C} \left(\mathfrak {A} _ {\text {l i n}}; \mathcal {P} _ {R, D} ^ {2 n, \text {s k e w}}, k\right) (19) \\ \leq \mathcal {C} \left(\mathfrak {A} _ {\mathrm {l i n}}; \mathcal {P} _ {R, D} ^ {2 n}, k\right) (20) \\ \leq \frac {R ^ {2} D ^ {2}}{(2 \lfloor k / 2 \rfloor + 1) ^ {2}}. (21) \\ \end{array}
|
| 365 |
+
$$
|
| 366 |
+
|
| 367 |
+
Inequality (17) is what we established in Section 3.1. Inequality (18) follows from $\mathfrak{A}_{\mathrm{sim}} \subset \mathfrak{A}_{\mathrm{sep}}$ and the fact that the infimum over a larger class is smaller. Roughly speaking, the quantities in lines (19) and (20) are the complexity lower bounds for solving linear equations using only matrix-vector products, which were studied thoroughly in (Nemirovsky, 1991; 1992). We will show inequalities (19), (20), and (21) by establishing the connection of Nemirovsky's work with our setup of biaffine saddle problems. Once this is done, equality holds throughout and (16) is proved.
|
| 368 |
+
|
| 369 |
+
We first provide the definitions. Let $\mathcal{P}_{R,D}^{2n}$ be the collection of linear equations with $2n\times 2n$ matrices $\mathbf{B}$ satisfying $\| \mathbf{B}\| \leq R$ and $\mathbf{v} = \mathbf{B}\mathbf{z}^{\star}$ for some $\mathbf{z}^{\star}\in B(0;D)$ . Let $\mathcal{P}_{R,D}^{2n,\mathrm{skew}}\subset \mathcal{P}_{R,D}^{2n}$ be the subclass of equations with skew-symmetric $\mathbf{B}$ . Let $\mathfrak{A}_{\mathrm{lin}}$ be the class of iterative algorithms solving linear equations $\mathbf{B}\mathbf{z} = \mathbf{v}$ using only matrix multiplication by $\mathbf{B}$ and $\mathbf{B}^{\intercal}$ in the sense that
|
| 370 |
+
|
| 371 |
+
$$
|
| 372 |
+
\mathbf {z} ^ {k} \in \operatorname {s p a n} \left\{\mathbf {v} ^ {0}, \dots , \mathbf {v} ^ {k} \right\}, \tag {22}
|
| 373 |
+
$$
|
| 374 |
+
|
| 375 |
+
where $\mathbf{v}^0 = 0$ , $\mathbf{v}^1 = \mathbf{v}$ , and for $k \geq 2$
|
| 376 |
+
|
| 377 |
+
$$
|
| 378 |
+
\mathbf {v} ^ {k} = \mathbf {B} \mathbf {v} ^ {j} \text {o r} \mathbf {B} ^ {\intercal} \mathbf {v} ^ {j} \text {f o r s o m e} j = 0, \dots , k - 1.
|
| 379 |
+
$$
|
| 380 |
+
|
| 381 |
+
The optimal complexity lower bound for a class of linear equation instances is defined as
|
| 382 |
+
|
| 383 |
+
$$
|
| 384 |
+
\mathcal{C}\left(\mathfrak{A}_{\mathrm{lin}};\mathcal{P}^{2n}_{R,D},k\right):= \inf_{\mathcal{A}\in \mathfrak{A}_{\mathrm{lin}}}\sup_{\substack{\| \mathbf{B}\| \leq R\\ \mathbf{v} = \mathbf{B}\mathbf{z}^{\star},\| \mathbf{z}^{\star}\| \leq D}}\left\| \mathbf{B}\mathbf{z}^{k} - \mathbf{v}\right\|^{2}.
|
| 385 |
+
$$
|
| 386 |
+
|
| 387 |
+
Define $\mathcal{C}\left(\mathfrak{A}_{\mathrm{lin}};\mathcal{P}_{R,D}^{2n,\mathrm{skew}},k\right)$ analogously.
|
| 388 |
+
|
| 389 |
+
Now we relate the optimal complexity lower bounds for biaffine minimax problems to those for linear equations. For $\mathbf{L}(\mathbf{x},\mathbf{y}) = \mathbf{b}^{\mathsf{T}}\mathbf{x} + \mathbf{x}^{\mathsf{T}}\mathbf{A}\mathbf{y} - \mathbf{c}^{\mathsf{T}}\mathbf{y}$ , we have
|
| 390 |
+
|
| 391 |
+
$$
|
| 392 |
+
\mathbf {G} _ {\mathbf {L}} (\mathbf {x}, \mathbf {y}) = \left[ \begin{array}{c c} \mathbf {O} & \mathbf {A} \\ - \mathbf {A} ^ {\intercal} & \mathbf {O} \end{array} \right] \left[ \begin{array}{c} \mathbf {x} \\ \mathbf {y} \end{array} \right] + \left[ \begin{array}{c} \mathbf {b} \\ \mathbf {c c} \end{array} \right].
|
| 393 |
+
$$
|
| 394 |
+
|
| 395 |
+
Therefore, the minimax problem $\mathcal{P}_{\mathbf{L}}$ for $\mathbf{L} \in \mathcal{L}_R^{\mathrm{biaff}}(\mathbb{R}^n \times \mathbb{R}^n)$ is equivalent to solving the linear equation $\mathbf{B}\mathbf{z} = \mathbf{v}$ with $\mathbf{B} = \begin{bmatrix} \mathbf{O} & -\mathbf{A} \\ \mathbf{A}^\intercal & \mathbf{O} \end{bmatrix}$ and $\mathbf{v} = \begin{bmatrix} \mathbf{b} \\ \mathbf{c} \end{bmatrix} \in \mathbb{R}^{2n}$ , which belongs to $\mathcal{P}_{R,D}^{2n,\mathrm{skew}}$ with $D = \| z^{\star}\|$ .
|
| 396 |
+
|
| 397 |
+
For both algorithm classes $\mathfrak{A}_{\mathrm{sim}}$ and $\mathfrak{A}_{\mathrm{lin}}$ , we may assume without loss of generality that $\mathbf{z}^0 = 0$ through translation. Then, the span condition (10) for $\mathfrak{A}_{\mathrm{sim}}$ becomes
|
| 398 |
+
|
| 399 |
+
$$
|
| 400 |
+
\mathbf {z} ^ {k} \in \mathcal {K} _ {k - 1} (\mathbf {B}; \mathbf {v}). \tag {23}
|
| 401 |
+
$$
|
| 402 |
+
|
| 403 |
+
Note that (22) reduces to (23) as $\mathbf{B}$ is skew-symmetric, so $\mathfrak{A}_{\mathrm{sim}}$ and $\mathfrak{A}_{\mathrm{lin}}$ are effectively the same class of algorithms under the identification $\mathcal{P}_{\mathcal{L}_R^{\mathrm{biaff}}(\mathbb{R}^n\times \mathbb{R}^n)}\subset \mathcal{P}_{R,D}^{2n,\mathrm{skew}}$ .
|
| 404 |
+
|
| 405 |
+
Since the supremum over a larger class of problems is larger, inequality (19) holds. Similarly, inequality (20) follows from $\mathcal{P}_{R,D}^{2n,\mathrm{skew}}\subset \mathcal{P}_{R,D}^{2n}$ .
|
| 406 |
+
|
| 407 |
+
Finally, (21) follows from the following lemma, using arguments based on Chebyshev-type matrix polynomials from Nemirovsky (1992). Its proof is deferred to the appendix.
|
| 408 |
+
|
| 409 |
+
Lemma 4. Let $R > 0$ and $k \geq 0$ . Then there exists $\mathcal{A} \in \mathfrak{A}_{\mathrm{lin}}$ such that for any $m \geq 1$ , $\mathbf{B} \in \mathbb{R}^{m \times m}$ , and $\mathbf{v} = \mathbf{B}\mathbf{z}^{\star}$ satisfying $\| \mathbf{B} \| \leq R$ and $\| \mathbf{z}^{\star} \| \leq D$ , the $\mathbf{z}^k$ -iterate produced by $\mathcal{A}$ satisfies
|
| 410 |
+
|
| 411 |
+
$$
|
| 412 |
+
\left\| \mathbf {B} \mathbf {z} ^ {k} - \mathbf {v} \right\| ^ {2} \leq \frac {R ^ {2} D ^ {2}}{\left(2 \left\lfloor k / 2 \right\rfloor + 1\right) ^ {2}}.
|
| 413 |
+
$$
|
| 414 |
+
|
| 415 |
+
# 3.3. Broader algorithm classes via resisting oracles
|
| 416 |
+
|
| 417 |
+
In (10) and (11), we assumed the subgradient queries are made within the span of the gradients at the previous iterates. This requirement (the linear span assumption) can be removed, i.e., a similar analysis can be done on general deterministic black-box gradient-based algorithms (formally defined in the appendix, Section C.5), using the resisting oracle technique (Nemirovsky & Yudin, 1983) at the cost of slightly enlarging the required problem dimension. We informally state the generalized result below and provide details in the appendix.
|
| 418 |
+
|
| 419 |
+
Theorem 4 (Informal). Let $n \geq 3k + 2$ . For any gradient-based deterministic algorithm, there exists an $R$ -smooth biaffine function $\mathbf{L}$ on $\mathbb{R}^n \times \mathbb{R}^n$ such that (12) holds.
|
| 420 |
+
|
| 421 |
+
Although we do not formally pursue this, the requirement that the algorithm is not randomized can also be removed using the techniques of Woodworth & Srebro (2016), which exploit near-orthogonality of random vectors in high dimensions.
|
| 422 |
+
|
| 423 |
+
# 3.4. Discussion
|
| 424 |
+
|
| 425 |
+
We established that one cannot improve the lower bound of Theorem 3 using biaffine functions, arguably the simplest family of convex-concave functions. Furthermore, this optimality statement holds for both algorithm classes $\mathfrak{A}_{\mathrm{sep}}$ and $\mathfrak{A}_{\mathrm{sim}}$ as established through the chain of inequalities in Section 3.2. However, as demonstrated by Drori (2017), who introduced a non-quadratic lower bound for smooth convex minimization that improves upon the classical quadratic lower bounds of Nemirovsky (1992) and Nesterov (2013), a non-biaffine construction may improve the constant. In our setup, there is a factor-near-100 difference between the upper and lower bounds. (Note that each EAG iteration requires 2 evaluations of the saddle subdifferential oracle.) We suspect that both the algorithm and the lower bound can be improved upon, but we leave this to future work.
|
| 426 |
+
|
| 427 |
+
Golowich et al. (2020) establishes that for the class of 1-SCLI algorithms (S is for stationary), a subclass of $\mathfrak{A}_{\mathrm{sim}}$ for biaffine objectives, one cannot achieve a rate faster than $\| \nabla \mathbf{L}(\mathbf{z}^k)\|^2 \leq \mathcal{O}(1 / k)$ . This lower bound applies to EG but not EAG; EAG is not 1-SCLI, as its anchoring coefficients $\frac{1}{k + 2}$ vary over iterations, and its convergence rate breaks the 1-SCLI lower bound. On the other hand, we can view EAG as a non-stationary CLI algorithm (Arjevani & Shamir, 2016, Definition 2). We further discuss these connections in the appendix, Section E.
|
| 428 |
+
|
| 429 |
+
# 4. Experiments
|
| 430 |
+
|
| 431 |
+
We now present experiments illustrating the accelerated rate of EAG. We compare EAG-C and EAG-V against the prior algorithms with convergence guarantees: EG, Popov's algorithm (or optimistic descent) and simultaneous gradient descent with anchoring (SimGD-A). The precise forms of the algorithms are restated in the appendix.
|
| 432 |
+
|
| 433 |
+
Figure 1(a) presents experiments on our first example, constructed as follows. For $\epsilon >0$ , define
|
| 434 |
+
|
| 435 |
+
$$
|
| 436 |
+
f _ {\epsilon} (u) = \left\{ \begin{array}{l l} \epsilon | u | - \frac {1}{2} \epsilon^ {2} & \text {i f} | u | \geq \epsilon , \\ \frac {1}{2} u ^ {2} & \text {i f} | u | < \epsilon . \end{array} \right.
|
| 437 |
+
$$
|
| 438 |
+
|
| 439 |
+
Next, for $0 < \epsilon \ll \delta \ll 1$ , define
|
| 440 |
+
|
| 441 |
+
$$
|
| 442 |
+
\mathbf {L} _ {\delta , \epsilon} (x, y) = (1 - \delta) f _ {\epsilon} (x) + \delta x y - (1 - \delta) f _ {\epsilon} (y), \tag {24}
|
| 443 |
+
$$
|
| 444 |
+
|
| 445 |
+

|
| 446 |
+
(a) Two-dimensional example $\mathbf{L}_{\delta ,\epsilon}$ of (24)
|
| 447 |
+
|
| 448 |
+

|
| 449 |
+
(b) Lagrangian of linearly constrained QP of (25)
|
| 450 |
+
|
| 451 |
+

|
| 452 |
+
Figure 1. Plots of $\| \mathbf{G}(\mathbf{z}^k)\|^2$ versus iteration count. Dashed lines indicate corresponding theoretical upper bounds.
|
| 453 |
+
(a) Discrete trajectories with $\mathbf{L}_{\delta ,\epsilon}$
|
| 454 |
+
Figure 2. Comparison of the discrete trajectories and their corresponding continuous-time flow. Trajectories from EAG-C and SimGD-A virtually coincide and resemble the anchored flow. However, SimGD-A progresses slower due to its diminishing step-sizes.
|
| 455 |
+
|
| 456 |
+

|
| 457 |
+
(b) Moreau-Yosida regularized flow with $\lambda = 0.01$ and the anchored flow with $\mathbf{L}(x,y) = xy$
|
| 458 |
+
|
| 459 |
+
where $x, y \in \mathbb{R}$ . Since $f_{\epsilon}$ is a 1-smooth convex function, $\mathbf{L}_{\delta, \epsilon}$ has smoothness parameter 1, which is almost tight due to the quadratic behavior of $\mathbf{L}_{\delta, \epsilon}$ within the region $|x|, |y| \leq \epsilon$ . This construction was inspired by Drori & Teboulle (2014), who presented $f_{\epsilon}$ as the worst-case instance for gradient descent. We choose the step-size $\alpha = 0.1$ as this value is comfortably within the theoretical range of convergent parameters for EG, EAG-C, and Popov. For EAG-V, we set $\alpha_0 = 0.1$ . We use $N = 10^5$ , $\delta = 10^{-2}$ , and $\epsilon = 5 \times 10^{-5}$ , and the initial point $\mathbf{z}^0$ has norm 1.
|
| 460 |
+
|
| 461 |
+
Figure 1(b) presents experiments on our second example
|
| 462 |
+
|
| 463 |
+
$$
|
| 464 |
+
\mathbf {L} (\mathbf {x}, \mathbf {y}) = \frac {1}{2} \mathbf {x} ^ {\mathrm {T}} \mathbf {H} \mathbf {x} - \mathbf {h} ^ {\mathrm {T}} \mathbf {x} - \langle \mathbf {A x} - \mathbf {b}, \mathbf {y} \rangle , \tag {25}
|
| 465 |
+
$$
|
| 466 |
+
|
| 467 |
+
where $\mathbf{x},\mathbf{y}\in \mathbb{R}^n$ $\mathbf{A}\in \mathbb{R}^{n\times n}$ $\mathbf{b}\in \mathbb{R}^n$ $\mathbf{H}\in \mathbb{R}^{n\times n}$ is positive semidefinite, and $\mathbf{h}\in \mathbb{R}^n$ . Note that this is the Lagrangian of a linearly constrained quadratic minimization problem. We adopted this saddle function from Ouyang & Xu (2021), where the authors constructed $\mathbf{H},\mathbf{h},\mathbf{A}$ and $\mathbf{b}$ to
|
| 468 |
+
|
| 469 |
+
provide a lower bound on duality gap. The exact forms of $\mathbf{H}$ , $\mathbf{h}$ , $\mathbf{A}$ , and $\mathbf{b}$ are restated in the appendix. We use $n = 200$ , $N = 10^6$ , $\alpha = 0.5$ for EG and Popov, $\alpha = 0.1265$ for EAG-C and $\alpha_0 = 0.618$ for EAG-V. Finally, we use the initial point $\mathbf{z}^0 = 0$ .
|
| 470 |
+
|
| 471 |
+
ODE Interpretation Figure 2(a) illustrates the algorithms applied to (24). For $|x|, |y| \gg \epsilon$ ,
|
| 472 |
+
|
| 473 |
+
$$
|
| 474 |
+
\mathbf {G} _ {\mathbf {L} _ {\delta , \epsilon}} (x, y) = \left[ \begin{array}{c} (1 - \delta) \epsilon + \delta y \\ (1 - \delta) \epsilon - \delta x \end{array} \right] \approx \delta \left[ \begin{array}{c} y \\ - x \end{array} \right],
|
| 475 |
+
$$
|
| 476 |
+
|
| 477 |
+
so the algorithms roughly behave as if the objective is the bilinear function $\delta xy$ . When $\delta$ is sufficiently small, trajectories of the algorithms closely resemble the corresponding continuous-time flows with $\mathbf{L}(x,y) = xy$ .
|
| 478 |
+
|
| 479 |
+
Csetnek et al. (2019) demonstrated that Popov's algorithm can be viewed as discretization of the Moreau-Yosida regularized flow $\dot{\mathbf{z}}(t) = -\frac{\mathbf{G} - (\mathrm{Id} + \lambda \mathbf{G})^{-1}}{\lambda} (\mathbf{z}(t))$ for some $\lambda > 0$ ,
|
| 480 |
+
|
| 481 |
+
and a similar analysis can be performed with EG. This connection explains why EG's trajectory in Figure 2(a) and the regularized flow depicted in Figure 2(b) are similar.
|
| 482 |
+
|
| 483 |
+
On the other hand, EAG and SimGD-A can be viewed as a discretization of the anchored flow ODE
|
| 484 |
+
|
| 485 |
+
$$
|
| 486 |
+
\dot {\mathbf {z}} (t) = - \mathbf {G} (\mathbf {z} (t)) + \frac {1}{t} (\mathbf {z} ^ {0} - \mathbf {z} (t)).
|
| 487 |
+
$$
|
| 488 |
+
|
| 489 |
+
The anchored flow depicted in Figure 2(b) approaches the solution much more quickly due to the anchoring term dampening the cycling behavior. The trajectories of EAG and SimGD-A iterates in Figure 2(a) are very similar to the anchored flow. However, SimGD-A requires diminishing step-sizes $\frac{1 - p}{(k + 1)^p}$ (both theoretically and experimentally) and therefore progresses much slower.
|
| 490 |
+
|
| 491 |
+
# 5. Conclusion
|
| 492 |
+
|
| 493 |
+
This work presents the extra anchored gradient (EAG) algorithms, which exhibit accelerated $\mathcal{O}(1 / k^2)$ rates on the squared gradient magnitude for smooth convex-concave minimax problems. The acceleration combines the extragradient and anchoring mechanisms, which separately achieve $\mathcal{O}(1 / k)$ or slower rates. We complement the $\mathcal{O}(1 / k^2)$ rate with a matching $\Omega(1 / k^2)$ complexity lower bound, thereby establishing optimality of EAG.
|
| 494 |
+
|
| 495 |
+
At a superficial level, the acceleration mechanism of EAG seems to be distinct from that of Nesterov; anchoring dampens oscillations, but momentum provides the opposite effect of dampening. However, are the two accelerations phenomena entirely unrelated? Finding a common structure, a connection, between the two acceleration phenomena would be an interesting direction of future work.
|
| 496 |
+
|
| 497 |
+
# Acknowledgements
|
| 498 |
+
|
| 499 |
+
TY and EKR were supported by the National Research Foundation of Korea (NRF) Grant funded by the Korean Government (MSIP) [No. 2020R1F1A1A01072877], the National Research Foundation of Korea (NRF) Grant funded by the Korean Government (MSIP) [No. 2017R1A5A1015626], by the New Faculty Startup Fund from Seoul National University, and by the AI Institute of Seoul National University (AIIS) through its AI Frontier Research Grant (No. 0670-20200015) in 2020. We thank Jaewook Suh and Jongmin Lee for reviewing the manuscript and providing valuable feedback. We thank Jelena Diakonikolas for the discussion on the prior work on parameter-free near-optimal methods for the smooth minimax setup. Finally, we thank the anonymous referees for bringing to our attention the recent complexity lower bound on the class of 1-SCLI algorithms by Golowich et al. (2020).
|
| 500 |
+
|
| 501 |
+
# References
|
| 502 |
+
|
| 503 |
+
Alkousa, M., Gasnikov, A., Dvinskikh, D., Kovalev, D., and Stonyakin, F. Accelerated methods for saddle-point problem. Computational Mathematics and Mathematical Physics, 60(11):1787-1809, 2020.
|
| 504 |
+
Antonakopoulos, K., Belmega, E. V., and Mertikopoulos, P. Adaptive extra-gradient methods for min-max optimization and games. *ICLR*, 2021.
|
| 505 |
+
Arjevani, Y. and Shamir, O. On the iteration complexity of oblivious first-order optimization algorithms. ICML, 2016.
|
| 506 |
+
Arjevani, Y., Shalev-Shwartz, S., and Shamir, O. On lower and upper bounds in smooth and strongly convex optimization. Journal of Machine Learning Research, 17(1): 4303-4353, 2016.
|
| 507 |
+
Azizian, W., Mitliagkas, I., Lacoste-Julien, S., and Gidel, G. A tight and unified analysis of gradient-based methods for a whole spectrum of differentiable games. AISTATS, 2020.
|
| 508 |
+
Bui, M. N. and Combettes, P. L. A warped resolvent algorithm to construct nash equilibria. arXiv preprint arXiv:2101.00532, 2021.
|
| 509 |
+
Censor, Y., Gibali, A., and Reich, S. The subgradient extragradient method for solving variational inequalities in Hilbert space. Journal of Optimization Theory and Applications, 148(2):318-335, 2011.
|
| 510 |
+
Chambolle, A. and Pock, T. A first-order primal-dual algorithm for convex problems with applications to imaging. Journal of Mathematical Imaging and Vision, 40(1):120-145, 2011.
|
| 511 |
+
Chambolle, A. and Pock, T. On the ergodic convergence rates of a first-order primal-dual algorithm. Mathematical Programming, 159(1-2):253-287, 2016.
|
| 512 |
+
Chen, Y., Lan, G., and Ouyang, Y. Optimal primal-dual methods for a class of saddle point problems. SIAM Journal on Optimization, 24(4):1779-1814, 2014.
|
| 513 |
+
Chen, Y., Lan, G., and Ouyang, Y. Accelerated schemes for a class of variational inequalities. Mathematical Programming, 165(1):113-149, 2017.
|
| 514 |
+
Chen, Z., Zhou, Y., Xu, T., and Liang, Y. Proximal gradient descent-ascent: Variable convergence under KL geometry. *ICLR*, 2021.
|
| 515 |
+
Chiang, C.-K., Yang, T., Lee, C.-J., Mahdavi, M., Lu, C.-J., Jin, R., and Zhu, S. Online optimization with gradual variations. $COLT$ , 2012.
|
| 516 |
+
|
| 517 |
+
Condat, L. A primal-dual splitting method for convex optimization involving Lipschitzian, proximable and linear composite terms. Journal of Optimization Theory and Applications, 158(2):460-479, 2013.
|
| 518 |
+
Csetnek, E. R., Malitsky, Y., and Tam, M. K. Shadow Douglas-Rachford splitting for monotone inclusions. Applied Mathematics & Optimization, 80(3):665-678, 2019.
|
| 519 |
+
Daskalakis, C., Deckelbaum, A., and Kim, A. Near-optimal no-regret algorithms for zero-sum games. SODA, 2011.
|
| 520 |
+
Diakonikolas, J. Halpern iteration for near-optimal and parameter-free monotone inclusion and strong solutions to variational inequalities. $COLT$ , 2020.
|
| 521 |
+
Drori, Y. The exact information-based complexity of smooth convex minimization. Journal of Complexity, 39:1-16, 2017.
|
| 522 |
+
Drori, Y. and Teboulle, M. Performance of first-order methods for smooth convex minimization: A novel approach. Mathematical Programming, 145(1-2):451-482, 2014.
|
| 523 |
+
Ghadimi, S. and Lan, G. Optimal stochastic approximation algorithms for strongly convex stochastic composite optimization I: A generic algorithmic framework. SIAM Journal on Optimization, 22(4):1469-1492, 2012.
|
| 524 |
+
Ghadimi, S. and Lan, G. Optimal stochastic approximation algorithms for strongly convex stochastic composite optimization, II: Shrinking procedures and optimal algorithms. SIAM Journal on Optimization, 23(4):2061-2089, 2013.
|
| 525 |
+
Gidel, G., Berard, H., Vignoud, G., Vincent, P., and Lacoste-Julien, S. A variational inequality perspective on generative adversarial networks. *ICLR*, 2018.
|
| 526 |
+
Gidel, G., Hemmat, R. A., Pezeshki, M., Le Priol, R., Huang, G., Lacoste-Julien, S., and Mitliagkas, I. Negative momentum for improved game dynamics. AISTATS, 2019.
|
| 527 |
+
Golowich, N., Pattathil, S., Daskalakis, C., and Ozdaglar, A. Last iterate is slower than averaged iterate in smooth convex-concave saddle point problems. $COLT$ , 2020.
|
| 528 |
+
Goodfellow, I., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A., and Bengio, Y. Generative adversarial nets. NeurIPS, 2014.
|
| 529 |
+
Goodfellow, I. J., Shlens, J., and Szegedy, C. Explaining and harnessing adversarial examples. *ICLR*, 2015.
|
| 530 |
+
Halpern, B. Fixed points of nonexpanding maps. Bulletin of the American Mathematical Society, 73(6):957-961, 1967.
|
| 531 |
+
|
| 532 |
+
Hamedani, E. Y. and Aybat, N. S. A primal-dual algorithm for general convex-concave saddle point problems. arXiv preprint arXiv:1803.01401, 2018.
|
| 533 |
+
He, Y. and Monteiro, R. D. An accelerated HPE-type algorithm for a class of composite convex-concave saddlepoint problems. SIAM Journal on Optimization, 26(1): 29-56, 2016.
|
| 534 |
+
Hsieh, Y.-G., Iutzeler, F., Malick, J., and Mertikopoulos, P. On the convergence of single-call stochastic extra-gradient methods. NeurIPS, 2019.
|
| 535 |
+
Jin, C., Netrapalli, P., and Jordan, M. I. Minmax optimization: Stable limit points of gradient descent ascent are locally optimal. arXiv preprint arXiv:1902.00618, 2019.
|
| 536 |
+
Juditsky, A., Nemirovski, A., and Tauvel, C. Solving variational inequalities with stochastic mirror-prox algorithm. Stochastic Systems, 1(1):17-58, 2011.
|
| 537 |
+
Kolossoski, O. and Monteiro, R. D. An accelerated noneuclidean hybrid proximal extragradients-type algorithm for convex-concave saddle-point problems. Optimization Methods and Software, 32(6):1244-1272, 2017.
|
| 538 |
+
Korpelevich, G. Extragradient method for finding saddle points and other problems. Matekon, 13(4):35-49, 1977.
|
| 539 |
+
Lan, G. An optimal method for stochastic composite optimization. Mathematical Programming, 133(1-2):365-397, 2012.
|
| 540 |
+
Liang, T. and Stokes, J. Interaction matters: A note on non-asymptotic local convergence of generative adversarial networks. AISTATS, 2019.
|
| 541 |
+
Lieder, F. On the convergence rate of the halpern-iteration. Optimization Letters, pp. 1-14, 2020.
|
| 542 |
+
Lin, T., Jin, C., and Jordan, M. On gradient descent ascent for nonconvex-concave minimax problems. ICML, 2020a.
|
| 543 |
+
Lin, T., Jin, C., Jordan, M., et al. Near-optimal algorithms for minimax optimization. $COLT$ , 2020b.
|
| 544 |
+
Lu, S., Tsaknakis, I., Hong, M., and Chen, Y. Hybrid block successive approximation for one-sided non-convex min-max problems: Algorithms and applications. IEEE Transactions on Signal Processing, 68:3676-3691, 2020.
|
| 545 |
+
Lyashko, S., Semenov, V., and Voitova, T. Low-cost modification of Korpelevich's methods for monotone equilibrium problems. Cybernetics and Systems Analysis, 47(4): 631, 2011.
|
| 546 |
+
Madry, A., Makelov, A., Schmidt, L., Tsipras, D., and Vladu, A. Towards deep learning models resistant to adversarial attacks. *ICLR*, 2018.
|
| 547 |
+
|
| 548 |
+
Malitsky, Y. Projected reflected gradient methods for monotone variational inequalities. SIAM Journal on Optimization, 25(1):502-520, 2015.
|
| 549 |
+
Malitsky, Y. Golden ratio algorithms for variational inequalities. Mathematical Programming, 184:383-410, 2020.
|
| 550 |
+
Malitsky, Y. and Tam, M. K. A forward-backward splitting method for monotone inclusions without cocoercivity. SIAM Journal on Optimization, 30(2):1451-1472, 2020.
|
| 551 |
+
Malitsky, Y. V. and Semenov, V. An extragradient algorithm for monotone variational inequalities. Cybernetics and Systems Analysis, 50(2):271-277, 2014.
|
| 552 |
+
Mason, J. C. and Handscomb, D. C. Chebyshev Polynomials. 2002.
|
| 553 |
+
Mertikopoulos, P., Zenati, H., Lecouat, B., Foo, C.-S., Chandrasekhar, V., and Piliouras, G. Optimistic mirror descent in saddle-point problems: Going the extra (gradient) mile. *ICLR*, 2019.
|
| 554 |
+
Mokhtari, A., Ozdaglar, A., and Pattathil, S. A unified analysis of extra-gradient and optimistic gradient methods for saddle point problems: Proximal point approach. AISTATS, 2020a.
|
| 555 |
+
Mokhtari, A., Ozdaglar, A. E., and Pattathil, S. Convergence rate of $\mathcal{O}(1 / k)$ for optimistic gradient and extragradient methods in smooth convex-concave saddle point problems. SIAM Journal on Optimization, 30(4):3230-3251, 2020b.
|
| 556 |
+
Monteiro, R. D. and Svaiter, B. F. On the complexity of the hybrid proximal extragradients method for the iterates and the ergodic mean. SIAM Journal on Optimization, 20(6): 2755-2787, 2010.
|
| 557 |
+
Monteiro, R. D. and Svaiter, B. F. Complexity of variants of Tseng's modified FB splitting and Korpelevich's methods for hemivariational inequalities with applications to saddle-point and convex optimization problems. SIAM Journal on Optimization, 21(4):1688-1720, 2011.
|
| 558 |
+
Nedic, A. and Ozdaglar, A. Subgradient methods for saddlepoint problems. Journal of Optimization Theory and Applications, 142(1):205-228, 2009.
|
| 559 |
+
Nemirovski, A. Prox-method with rate of convergence $\mathcal{O}(1 / t)$ for variational inequalities with Lipschitz continuous monotone operators and smooth convex-concave saddle point problems. SIAM Journal on Optimization, 15(1):229-251, 2004.
|
| 560 |
+
Nemirovski, A., Juditsky, A., Lan, G., and Shapiro, A. Robust stochastic approximation approach to stochastic programming. SIAM Journal on Optimization, 19(4):1574-1609, 2009.
|
| 561 |
+
|
| 562 |
+
Nemirovsky, A. S. On optimality of Krylov's information when solving linear operator equations. Journal of Complexity, 7(2):121-130, 1991.
|
| 563 |
+
Nemirovsky, A. S. Information-based complexity of linear operator equations. Journal of Complexity, 8(2):153-175, 1992.
|
| 564 |
+
Nemirovsky, A. S. and Yudin, D. B. Problem Complexity and Method Efficiency in Optimization. 1983.
|
| 565 |
+
Nesterov, Y. Excessive gap technique in nonsmooth convex minimization. SIAM Journal on Optimization, 16(1): 235-249, 2005a.
|
| 566 |
+
Nesterov, Y. Smooth minimization of non-smooth functions. Mathematical Programming, 103(1):127-152, 2005b.
|
| 567 |
+
Nesterov, Y. Dual extrapolation and its applications to solving variational inequalities and related problems. Mathematical Programming, 109(2-3):319-344, 2007.
|
| 568 |
+
Nesterov, Y. Primal-dual subgradient methods for convex problems. Mathematical Programming, 120(1):221-259, 2009.
|
| 569 |
+
Nesterov, Y. Introductory Lectures on Convex Optimization: A Basic Course. 2013.
|
| 570 |
+
Nesterov, Y. and Scriminali, L. Solving strongly monotone variational and quasi-variational inequalities. Discrete & Continuous Dynamical Systems - A, 31(4):1383-1396, 2011.
|
| 571 |
+
Noor, M. A. New extragradient-type methods for general variational inequalities. Journal of Mathematical Analysis and Applications, 277(2):379-394, 2003.
|
| 572 |
+
Nouiehed, M., Sanjabi, M., Huang, T., Lee, J. D., and Razaviyayn, M. Solving a class of non-convex min-max games using iterative first order methods. NeurIPS, 2019.
|
| 573 |
+
Ostrovskii, D. M., Lowy, A., and Razaviyayn, M. Efficient search of first-order Nash equilibria in nonconvex-concave smooth min-max problems. arXiv preprint arXiv:2002.07919, 2020.
|
| 574 |
+
Ouyang, Y. and Xu, Y. Lower complexity bounds of first-order methods for convex-concave bilinear saddle-point problems. Mathematical Programming, 185:1-35, 2021.
|
| 575 |
+
Peng, W., Dai, Y.-H., Zhang, H., and Cheng, L. Training GANs with centripetal acceleration. Optimization Methods and Software, 35(5):955-973, 2020.
|
| 576 |
+
Popov, L. D. A modification of the Arrow-Hurwicz method for search of saddle points. Mathematical Notes of the Academy of Sciences of the USSR, 28(5):845-848, 1980.
|
| 577 |
+
|
| 578 |
+
Rafique, H., Liu, M., Lin, Q., and Yang, T. Non-convex minmax optimization: Provable algorithms and applications in machine learning. arXiv preprint arXiv:1810.02060, 2018.
|
| 579 |
+
Rakhlin, A. and Sridharan, K. Online learning with predictable sequences. $COLT$ , 2013a.
|
| 580 |
+
Rakhlin, S. and Sridharan, K. Optimization, learning, and games with predictable sequences. NeurIPS, 2013b.
|
| 581 |
+
Rockafellar, R. T. Monotone operators associated with saddle-functions and minimax problems. Nonlinear Functional Analysis, 18(part 1):397-407, 1970.
|
| 582 |
+
Ryu, E. K. and Yin, W. Large-Scale Convex Optimization via Monotone Operators. Draft, 2021.
|
| 583 |
+
Ryu, E. K., Yuan, K., and Yin, W. ODE analysis of stochastic gradient methods with optimism and anchoring for minimax problems and GANs. arXiv preprint arXiv:1905.10899, 2019.
|
| 584 |
+
Solodov, M. V. and Svaiter, B. F. A hybrid approximate extragradient-proximal point algorithm using the enlargement of a maximal monotone operator. Set-Valued Analysis, 7(4):323-345, 1999.
|
| 585 |
+
Syrgkanis, V., Agarwal, A., Luo, H., and Schapire, R. E. Fast convergence of regularized learning in games. NeurIPS, 2015.
|
| 586 |
+
Taylor, A. and Bach, F. Stochastic first-order methods: Non-asymptotic and computer-aided analyses via potential functions. *COLT*, 2019.
|
| 587 |
+
Taylor, A. B., Hendrickx, J. M., and Glineur, F. Smooth strongly convex interpolation and exact worst-case performance of first-order methods. Mathematical Programming, 161(1-2):307-345, 2017.
|
| 588 |
+
Thekumparampil, K. K., Jain, P., Netrapalli, P., and Oh, S. Efficient algorithms for smooth minimax optimization. NeurIPS, 2019.
|
| 589 |
+
Tseng, P. On linear convergence of iterative methods for the variational inequality problem. Journal of Computational and Applied Mathematics, 60(1-2):237-252, 1995.
|
| 590 |
+
Tseng, P. A modified forward-backward splitting method for maximal monotone mappings. SIAM Journal on Control and Optimization, 38(2):431-446, 2000.
|
| 591 |
+
Ṽ, B. C. A splitting algorithm for dual monotone inclusions involving cocoercive operators. Advances in Computational Mathematics, 38(3):667-681, 2013.
|
| 592 |
+
Wang, Y. and Li, J. Improved algorithms for convex-concave minimax optimization. NeurIPS, 2020.
|
| 593 |
+
|
| 594 |
+
Woodworth, B. and Srebro, N. Tight complexity bounds for optimizing composite objectives. NeurIPS, 2016.
|
| 595 |
+
Yadav, A., Shah, S., Xu, Z., Jacobs, D., and Goldstein, T. Stabilizing adversarial nets with prediction methods. *ICLR*, 2018.
|
| 596 |
+
Yan, M. A new primal-dual algorithm for minimizing the sum of three functions with a linear operator. Journal of Scientific Computing, 76(3):1698-1717, 2018.
|
| 597 |
+
Yang, J., Zhang, S., Kiyavash, N., and He, N. A catalyst framework for minimax optimization. NeurIPS, 2020.
|
| 598 |
+
Zhang, G., Bao, X., Lessard, L., and Grosse, R. A unified analysis of first-order methods for smooth games via integral quadratic constraints. arXiv preprint arXiv:2009.11359, 2020.
|
| 599 |
+
Zhang, J., Hong, M., and Zhang, S. On lower iteration complexity bounds for the saddle point problems. arXiv preprint arXiv:1912.07481, 2019.
|
| 600 |
+
Zhao, R. Optimal stochastic algorithms for convex-concave saddle-point problems. arXiv preprint arXiv:1903.01687, 2019.
|
acceleratedalgorithmsforsmoothconvexconcaveminimaxproblemswitho1k2rateonsquaredgradientnorm/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e47f515bf084fd880ee7875867a49429b332058c590e9bf2ac7370cf6541817
|
| 3 |
+
size 505096
|
acceleratedalgorithmsforsmoothconvexconcaveminimaxproblemswitho1k2rateonsquaredgradientnorm/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1d12f70a68749553141f0d8c30f47f0df87a59cc369ae759c197058d4cb5f86a
|
| 3 |
+
size 783626
|
acceleratingfeedforwardcomputationviaparallelnonlinearequationsolving/81d1cb54-2fa4-4b89-9d22-ce405d95e3f8_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e202ef23f638744df3fa0ef7c65fa6cbb63ec2d32f914060fa49d846f8619a32
|
| 3 |
+
size 76494
|
acceleratingfeedforwardcomputationviaparallelnonlinearequationsolving/81d1cb54-2fa4-4b89-9d22-ce405d95e3f8_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:445267481f9681ac39aaca3696f5ed9ebe76c981c1d9259d35962a5d04570316
|
| 3 |
+
size 90961
|
acceleratingfeedforwardcomputationviaparallelnonlinearequationsolving/81d1cb54-2fa4-4b89-9d22-ce405d95e3f8_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14bbef5328cb08ffb019cee4273ced2440ba4c07ca74933eccd2041f95e2ef5e
|
| 3 |
+
size 2186297
|
acceleratingfeedforwardcomputationviaparallelnonlinearequationsolving/full.md
ADDED
|
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerating Feedforward Computation via Parallel Nonlinear Equation Solving
|
| 2 |
+
|
| 3 |
+
Yang Song<sup>1</sup> Chenlin Meng<sup>1</sup> Renjie Liao<sup>2,3</sup> Stefano Ermon<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
Feedforward computation, such as evaluating a neural network or sampling from an autoregressive model, is ubiquitous in machine learning. The sequential nature of feedforward computation, however, requires a strict order of execution and cannot be easily accelerated with parallel computing. To enable parallelization, we frame the task of feedforward computation as solving a system of nonlinear equations. We then propose to find the solution using a Jacobi or Gauss-Seidel fixed-point iteration method, as well as hybrid methods of both. Crucially, Jacobi updates operate independently on each equation and can be executed in parallel. Our method is guaranteed to give exactly the same values as the original feedforward computation with a reduced (or equal) number of parallelizable iterations, and hence reduced time given sufficient parallel computing power. Experimentally, we demonstrate the effectiveness of our approach in accelerating (i) backpropagation of RNNs, (ii) evaluation of DenseNets, and (iii) autoregressive sampling of MADE and PixelCNN++, with speedup factors between 2.1 and 26 under various settings.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
With sufficient parallel computing resources, we can certainly accelerate any algorithm with a parallelizable component. However, many machine learning algorithms heavily rely on a seemingly non-parallelizable part—feedforward computation. To evaluate the output of a neural network, layers are computed one after the other in a feedforward fashion. To sample text from an autoregressive model, words
|
| 12 |
+
|
| 13 |
+
$^{1}$ Computer Science Department, Stanford University. $^{2}$ Department of Computer Science, University of Toronto. $^{3}$ Vector Institute. Correspondence to: Yang Song <yangsong@cs.stanford.edu>, Stefano Ermon <ermon@cs.stanford.edu>.
|
| 14 |
+
|
| 15 |
+
Proceedings of the $38^{th}$ International Conference on Machine Learning, PMLR 139, 2021. Copyright 2021 by the author(s).
|
| 16 |
+
|
| 17 |
+
are generated in sequence one by one. Because of the inherently sequential nature, it is difficult to directly perform feedforward computation in parallel—how can one output a label before any intermediate features are extracted, or generate the last word in a sentence before having seen the initial part?
|
| 18 |
+
|
| 19 |
+
At first sight, the idea of executing in parallel the various steps that comprise a feedforward computation procedure seems hopeless. Indeed, the task is clearly impossible in general. Machine learning workloads, however, have special properties that make the idea viable in some cases. First, computations are numerical in nature, and can tolerate small approximation errors. For example, it is acceptable if a faster method produces image samples at the cost of small imperceptible errors. Second, computations have been learned from data rather than designed by hand. As a result, they might involve unnecessary steps, and have dependencies between the various (sequential) stages that are weak enough to be ignored without significantly affecting the final results. Although we might not be able to explicitly characterize this structure of redundant dependencies, as long as it is present, we can design methods to take advantage of it.
|
| 20 |
+
|
| 21 |
+
Based on these insights, we propose an approach to accelerate feedforward computation with parallelism. Despite not beneficial for certain types of feedforward computation, it works well for many cases of practical interest in machine learning. Our key idea is to interpret feedforward computation as solving a triangular system of nonlinear equations, and use efficient numerical solvers to find the solution. This is advantageous because (i) many numerical equation solvers can be easily parallelized; and (ii) iterative numerical equation solvers generate a sequence of intermediate solutions of increasing quality, so we can use early stopping to trade off approximation error with computation time. In particular, we propose to find the solution of the triangular system using nonlinear Jacobi and Gauss-Seidel (GS) methods (Ortega & Rheinboldt, 1970). Crucially, Jacobi iterations update each state independently and can be naturally executed in parallel. Moreover, we show feedforward computation corresponds to GS iterations, and can be combined with Jacobi iterations to build hybrid methods that interpolate between them.
|
| 22 |
+
|
| 23 |
+
We empirically demonstrate the effectiveness and flexibility of our proposed numerical equation solvers by showing accelerations for three representative applications: (i) the backpropagation procedure for training RNNs; (ii) the inference of neural networks like DenseNets (Huang et al., 2017); and (iii) ancestral sampling from autoregressive models such as MADE (Germain et al., 2015) and PixelCNN++ (Salimans et al., 2017). In particular, for the RNN model considered in our experiments, our new method reduces the training time by more than a factor of two. For DenseNet, our Jacobi-type methods lead to an estimated speedup factor of 2.1. For ancestral sampling from autoregressive models, we achieve 26 and 25 times speed up for MADE sampling on MNIST (LeCun & Cortes, 2010) and CIFAR-10 (Krizhevsky et al., 2009) datasets; for PixelCNN++, we achieve 6.5 and 2.1 speedup factors respectively. Except for DenseNets where we simulate the performance due to computational constraints and implementation difficulties, all other results are measured with wall-clock time on a single GPU. This demonstrates that our methods not only perform well in the regime of massive parallel computing resources, but also have imminent practical values easily achievable with personal hardware.
|
| 24 |
+
|
| 25 |
+
# 2. Background
|
| 26 |
+
|
| 27 |
+
# 2.1. Feedforward Computation
|
| 28 |
+
|
| 29 |
+
Consider the problem of computing, given an input $\mathbf{u}$ , a sequence of states $\mathbf{s}_1,\mathbf{s}_2,\dots ,\mathbf{s}_T$ defined by the following recurrence relation:
|
| 30 |
+
|
| 31 |
+
$$
|
| 32 |
+
\mathbf {s} _ {t} = h _ {t} \left(\mathbf {u}, \mathbf {s} _ {1: t - 1}\right), \quad 1 \leq t \leq T, \tag {1}
|
| 33 |
+
$$
|
| 34 |
+
|
| 35 |
+
where $\{h_t\}_{t=1}^T$ are deterministic computable functions, and $\mathbf{s}_{1:t-1}$ is an abbreviation for $\mathbf{s}_1, \mathbf{s}_2, \dots, \mathbf{s}_{t-1}$ . From now on, we use $\mathbf{s}_{a:b}$ to denote $\mathbf{s}_a, \mathbf{s}_{a+1}, \dots, \mathbf{s}_b$ where $a \leq b$ and $a, b \in \mathbb{N}^+$ .
|
| 36 |
+
|
| 37 |
+
Given implementations of the functions $\{h_t\}_{t=1}^T$ , traditional feedforward computation solves this problem by sequentially evaluating and memorizing $\mathbf{s}_t$ , given $\mathbf{u}$ and the previously stored states $\mathbf{s}_{1:t-1}$ . Note that it cannot be naively parallelized across different time steps as each state $\mathbf{s}_t$ can only be obtained after we have already computed $\mathbf{s}_1, \dots, \mathbf{s}_{t-1}$ .
|
| 38 |
+
|
| 39 |
+
Feedforward computation is ubiquitous in machine learning. The following examples will appear in our experiments: (i) evaluating the output of a neural network layer by layer (neural network inference); (ii) back-propagating gradients from the loss function to weights (neural network training), and (iii) ancestral sampling from autoregressive models. For (i), $\mathbf{u}$ corresponds to the network input, and $\mathbf{s}_t$ denotes the activations of each layer; For (ii), $\mathbf{u}$ corresponds to the input and the activations stored during the forward pass, and $\mathbf{s}_t$ represents the gradient of the loss function w.r.t. each layer;
|
| 40 |
+
|
| 41 |
+
For (iii), $\mathbf{u}$ is the latent state of a pseudo-random number generator, and $\mathbf{s}_t$ is the $t$ -th dimension of the sample to be generated. See Appendix A for more detailed descriptions.
|
| 42 |
+
|
| 43 |
+
# 2.2. Solving Systems of Nonlinear Equations
|
| 44 |
+
|
| 45 |
+
A system of nonlinear equations has the following form
|
| 46 |
+
|
| 47 |
+
$$
|
| 48 |
+
f _ {i} \left(x _ {1}, x _ {2}, \dots , x _ {N}\right) = 0, \quad i = 1, 2 \dots , N, \tag {2}
|
| 49 |
+
$$
|
| 50 |
+
|
| 51 |
+
where $x_{1},x_{2},\dots ,x_{N}$ are unknown variables, and $f_{1},f_{2},\dots ,f_{N}$ are nonlinear functions. There are many effective numerical methods for solving systems of nonlinear equations. In this paper we mainly focus on nonlinear Jacobi and Gauss-Seidel methods, and refer to (Ortega & Rheinboldt, 1970) for an excellent introduction to the field.
|
| 52 |
+
|
| 53 |
+
# 2.2.1. NONLINEAR JACOBI ITERATION
|
| 54 |
+
|
| 55 |
+
To solve a system of equations like Eq. (2), iterative methods start from an initial guess $\mathbf{x}^0 \triangleq (x_1^0, x_2^0, \dots, x_N^0)$ of the solution, and gradually improve it through fixed-point iterations. We let $\mathbf{x}^k = (x_1^k, x_2^k, \dots, x_N^k)$ denote the solution obtained at the $k$ -th iteration. Given $\mathbf{x}^k$ , the nonlinear Jacobi iteration produces $\mathbf{x}^{k+1}$ by solving each univariate equation for $i = 1, 2, \dots, N$ :
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
f _ {i} \left(x _ {1} ^ {k}, \dots , x _ {i - 1} ^ {k}, x _ {i}, x _ {i + 1} ^ {k}, \dots , x _ {N} ^ {k}\right) = 0 \tag {3}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
for $x_{i}$ . We then set $x_{i}^{k + 1} = x_{i}$ for all $i$ . The process stops when it reaches a fixed point, or $\mathbf{x}^{k + 1}$ is sufficiently similar to $\mathbf{x}^k$ as measured by the forward difference $\left\| \mathbf{x}^{k + 1} - \mathbf{x}^k\right\| \leq \epsilon$ , where $\epsilon >0$ is a tolerance threshold. Crucially, all the $N$ univariate equations involved can be solved in parallel since there is no dependency among them.
|
| 62 |
+
|
| 63 |
+
# 2.2.2. NONLINEAR GAUSS-SEIDEL (GS) ITERATION
|
| 64 |
+
|
| 65 |
+
Nonlinear Gauss-Seidel (GS) iteration is another iterative solver for systems of nonlinear equations. Similar to Eq. (3), the $k$ -th step of nonlinear GS is to solve
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
f _ {i} \left(x _ {1} ^ {k + 1}, \dots , x _ {i - 1} ^ {k + 1}, x _ {i}, x _ {i - 1} ^ {k}, \dots , x _ {N} ^ {k}\right) = 0 \tag {4}
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
for $x_{i}$ and to set $x_{i}^{k + 1} = x_{i}$ for $i = 1,2,\dots ,N$ . The process stops when it reaches a fixed point, or $\left\| \mathbf{x}^{k + 1} - \mathbf{x}^k\right\| \leq \epsilon$ . Different from Eq. (3), GS updates leverage the new solutions as soon as they are available. This creates data dependency among adjacent univariate equations and therefore requires $N$ sequential computations to get $\mathbf{x}^{k + 1}$ from $\mathbf{x}^k$ . Assuming that each univariate equation of Eq. (3) and Eq. (4) takes the same time to solve, one GS iteration costs as much time as $N$ parallel Jacobi iterations.
|
| 72 |
+
|
| 73 |
+
Albeit one GS iteration involves sweeping over all variables and costs more compute than one Jacobi iteration, it can converge faster under certain cases, e.g., solving tridiagonal linear systems (Young, 2014).
|
| 74 |
+
|
| 75 |
+
# 3. Feedforward Computation as Equation Solving
|
| 76 |
+
|
| 77 |
+
Our main insight is to frame a feedforward computation problem as solving a system of equations. This novel perspective enables us to use iterative solvers, such as nonlinear Jacobi and Gauss-Seidel methods, to parallelize and potentially accelerate traditional feedforward computation.
|
| 78 |
+
|
| 79 |
+
# 3.1. Feedforward Computation Solves a Triangular System of Equations
|
| 80 |
+
|
| 81 |
+
Given input $\mathbf{u}$ , the recurrence relation among states $\mathbf{s}_1, \mathbf{s}_2, \dots, \mathbf{s}_T$ in Eq. (1) can be explicitly expressed as the following system of nonlinear equations
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
h _ {t} \left(\mathbf {u}, \mathbf {s} _ {1: t - 1}\right) - \mathbf {s} _ {t} = 0, \quad t = 1, 2, \dots , T \tag {5}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
We can re-write Eq. (5) as a systems of equations in the form of Eq. (2) if we let $N = T$ , $x_{i} \triangleq \mathbf{s}_{i}$ , and $f_{i}(x_{1}, x_{2}, \dots, x_{T}) \triangleq h_{i}(\mathbf{u}_{i}, \mathbf{s}_{1:i-1}) - \mathbf{s}_{i}$ , for $i = 1, \dots, N$ . One unique property of these functions is that $f_{i}(\cdot)$ does not depend on $x_{i+1}, \dots, x_{N}$ , and therefore a recurrence relation corresponds to a triangular system of nonlinear equations. Standard feedforward computation, as defined in Section 2.1, can be viewed as an iterative approach to solving the above triangular system of nonlinear equations.
|
| 88 |
+
|
| 89 |
+
# 3.2. Jacobi Iteration for Recurrence Relations
|
| 90 |
+
|
| 91 |
+
Any numerical equation solver can be employed to solve the system of nonlinear equations in Eq. (5) and if converges, should return the same values as obtained by standard feedforward computation. As an example, we can use nonlinear Jacobi iterations to solve Eq. (5), as given in Algorithm 1. Here we use $\mathbf{s}_{1:T}^k$ to denote the collection of all states at the $k$ -th iteration, and choose $\epsilon > 0$ as a threshold for early stopping when $\left\| \mathbf{s}_{1:T}^k - \mathbf{s}_{1:T}^{k-1} \right\| \leq \epsilon$ , i.e., the forward difference of states is small.
|
| 92 |
+
|
| 93 |
+
Although the nonlinear Jacobi iteration method is not guaranteed to converge to the correct solutions for general systems of equations (Saad, 2003), it does converge for solving triangular systems. In particular, it is easy to conclude:
|
| 94 |
+
|
| 95 |
+
Proposition 1. Algorithm 1 converges and yields the same result as standard feedforward computation in at most $T$ parallel iterations for any initialization of $\mathbf{s}_{1:T}^{0}$ if $\epsilon = 0$ .
|
| 96 |
+
|
| 97 |
+
In the same vein, we can also apply nonlinear GS iterations to Eq. (5). Interestingly, running one iteration of GS is the same as performing standard feedforward computation and hence GS for triangular systems always converges in a single step, even though there is typically no convergence guarantee for more general systems of equations.
|
| 98 |
+
|
| 99 |
+
As already discussed in Section 2, Jacobi iterations can exploit parallelism better than GS. Specifically, nonlinear
|
| 100 |
+
|
| 101 |
+
Algorithm 1 Nonlinear Jacobi Iteration
|
| 102 |
+
Input: u; $\epsilon$ ; $T$
|
| 103 |
+
Initialize $\mathbf{s}_1^0, \mathbf{s}_2^0, \dots, \mathbf{s}_T^0$ and set $k \gets 0$
|
| 104 |
+
repeat
|
| 105 |
+
$k \gets k + 1$
|
| 106 |
+
for $t = 1$ to $T$ do in parallel
|
| 107 |
+
$\mathbf{s}_t^k \gets h_t(\mathbf{u}, \mathbf{s}_{1:t-1}^{k-1})$
|
| 108 |
+
end for
|
| 109 |
+
until $k = T$ or $\| \mathbf{s}_{1:T}^k - \mathbf{s}_{1:T}^{k-1} \| \leq \epsilon$
|
| 110 |
+
return $\mathbf{s}_1^k, \mathbf{s}_2^k, \dots, \mathbf{s}_T^k$
|
| 111 |
+
|
| 112 |
+
Jacobi can complete $T$ iterations in parallel during which GS is only able to finish one iteration, if we assume that (i) the recurrence relation Eq. (1) can be evaluated using the same amount of time for all $t = 1,\dots ,T$ , and (ii) $T$ Jacobi updates can be done in parallel. Thus, under these assumptions, Algorithm 1 can be much faster than the standard feedforward computation if the convergence of Jacobi iterations is fast. At least in the worst case, Algorithm 1 requires only $T$ iterations executed in parallel, which takes the same wall-clock time as one GS iteration (i.e., standard feedforward computation).
|
| 113 |
+
|
| 114 |
+
# 3.3. Hybrid Iterative Solvers
|
| 115 |
+
|
| 116 |
+
We can combine Jacobi and GS iterations to leverage advantages from both methods. The basic idea is to group states into blocks and view Eq. (5) as a system of equations over these blocks. We can blend Jacobi and GS by first applying one of them to solve for the blocks, and then use the other to solve for individual states inside each block. Depending on which method is used first, we can define two different combinations dubbed Jacobi-GS and GS-Jacobi iterations respectively.
|
| 117 |
+
|
| 118 |
+
Algorithm 2 Nonlinear Jacobi-GS Iteration
|
| 119 |
+
Input: u; $\mathcal{B}_1,\mathcal{B}_2,\dots ,\mathcal{B}_M;\epsilon ;T$ Initialize $\mathbf{s}_1^0,\mathbf{s}_2^0,\dots ,\mathbf{s}_T^0$ and set $k\gets 0$ repeat $k\gets k + 1$ for $i = 1$ to $M$ do in parallel $[a,b]\gets \mathcal{B}_i$ for $j\in \mathcal{B}_i$ do $\mathbf{s}_j^k\gets h_j(\mathbf{u},\mathbf{s}_{1:a - 1}^{k - 1},\mathbf{s}_{a:j - 1}^k)$ end for end for until $k = M$ or $\| \mathbf{s}_{1:T}^k -\mathbf{s}_{1:T}^{k - 1}\| \leq \epsilon$ return $\mathbf{s}_1^k,\mathbf{s}_2^k,\dots ,\mathbf{s}_T^k$
|
| 120 |
+
|
| 121 |
+
Suppose we use an integer interval $\mathcal{B} = [[a,b]]$ to represent a block of variables $\{\mathbf{s}_a,\mathbf{s}_{a + 1},\dots ,\mathbf{s}_b\}$ , and let $\{\mathcal{B}_1,\mathcal{B}_2,\dots ,\mathcal{B}_M\}$ be a set of integer intervals that partitions $[[1,T]]$ . We formally define Jacobi-GS in Algorithm 2,
|
| 122 |
+
|
| 123 |
+
Algorithm 3 Nonlinear GS-Jacobi Iteration
|
| 124 |
+
Input: u; $\mathcal{B}_1,\mathcal{B}_2,\dots ,\mathcal{B}_M;\epsilon ;T$
|
| 125 |
+
Initialize $\mathbf{s}_1,\mathbf{s}_2,\dots ,\mathbf{s}_T$
|
| 126 |
+
for $i = 1$ to $M$ do Initialize $\mathbf{s}_j^0$ for all $j\in \mathcal{B}_i$ and set $k\gets 0$ $[a,b]\gets \mathcal{B}_i$ repeat $k\gets k + 1$ for $j\in \mathcal{B}_i$ do in parallel $\mathbf{s}_j^k\gets h_j(\mathbf{u},\mathbf{s}_{1:a - 1},\mathbf{s}_{a:j - 1}^{k - 1})$ end for until $k = |\mathcal{B}_i|$ or $\| \mathbf{s}_{\mathcal{B}_i}^k -\mathbf{s}_{\mathcal{B}_i}^{k - 1}\| \leq \epsilon$ $\mathbf{s}_{\mathcal{B}_i}\gets \mathbf{s}_{\mathcal{B}_i}^k$
|
| 127 |
+
end for
|
| 128 |
+
return $\mathbf{s}_1,\mathbf{s}_2,\dots ,\mathbf{s}_T$
|
| 129 |
+
|
| 130 |
+
where $\mathbf{s}_{\mathcal{B}}$ is a shorthand for $\{\mathbf{s}_i \mid i \in \mathcal{B}\}$ . GS-Jacobi can be similarly defined and we provide its pseudo-code in Algorithm 3. Particularly, in Jacobi-GS (Algorithm 2), all $M$ blocks are updated in parallel and states within each block $\mathcal{B}_i$ are updated sequentially based on the latest solutions. In GS-Jacobi (Algorithm 3), we sequentially update the $M$ blocks based on the latest solutions of previous blocks and the states within each block $\mathcal{B}_i$ are updated in parallel.
|
| 131 |
+
|
| 132 |
+
Since Eq. (5) is a triangular system of nonlinear equations, we have the following observation:
|
| 133 |
+
|
| 134 |
+
Proposition 2. For any initialization, Jacobi-GS (Algorithm 2) and GS-Jacobi (Algorithm 3) converge in at most $M$ block-wise iterations and yield the same results as obtained by standard feedforward computation if $\epsilon = 0$ .
|
| 135 |
+
|
| 136 |
+
In summary, all the numerical equation solvers discussed above have guaranteed convergence in finite steps when solving our triangular systems of nonlinear equations in Eq. (5), and can thus act as valid alternatives to standard feedforward computation. Traditional asymptotic analysis of convergence rates is not applicable here, since the quotient convergence factor is undefined, and the root convergence factor is zero (per the definitions in Ortega & Rheinboldt (1970)) when methods converge in finite steps.
|
| 137 |
+
|
| 138 |
+
# 4. Accelerating Feedforward Computation
|
| 139 |
+
|
| 140 |
+
Below we discuss when Jacobi or hybrid methods can accelerate feedforward computation. We start with a computation model that is idealized but captures important practical aspects of Jacobi methods. The computation model assumes (i) for all $t = 1,2,\dots ,T$ , the recurrence relation Eq. (1) takes the same amount of time to compute for all values that $\mathbf{s}_{1:t - 1}$ and $\mathbf{u}$ may take, and (ii) we have access to at least $T$ processors with the same computational power. For simplicity, we only count the computational cost of evaluat-
|
| 141 |
+
|
| 142 |
+
ing the recurrence relation given in Eq. (1) and ignore other potential costs that depend more on specific hardware implementation, such as data movements and synchronization.
|
| 143 |
+
|
| 144 |
+
We now analyze the advantages of various methods when the recurrence relations have different structures under the above computation model, and when the computation model is relaxed.
|
| 145 |
+
|
| 146 |
+
# 4.1. When to Use the Jacobi Solver
|
| 147 |
+
|
| 148 |
+
The above computation model has already been used several times to argue that $T$ parallel iterations of the Jacobi method costs the same wall-clock time as one sequential iteration of the GS method (i.e., the standard feedforward computation). According to Proposition 1, the Jacobi algorithm converges within $T$ parallel iterations. This implies that running Algorithm 1 is always faster or equally fast than standard feedforward computation (or GS).
|
| 149 |
+
|
| 150 |
+
Since Jacobi iterations use more processors for parallel execution, it is necessary to understand when the speedup of Jacobi methods is worthwhile. To get some intuition, we first consider some typical examples where Jacobi iterations may or may not lead to compelling speedups with respect to Gauss-Seidel.
|
| 151 |
+
|
| 152 |
+
Example 1: fully independent chains. The best case for Jacobi iteration is when for each $t = 1, \dots, T$ , $\mathbf{s}_t = h_t(\mathbf{u})$ . For recurrent relations where different states are fully independent of each other, one parallel iteration of Jacobi suffices to yield the correct values for all states, whereas standard feedforward computation needs to compute each state sequentially. Parallelism in this case results in the maximum possible speedup factor of $T$ .
|
| 153 |
+
|
| 154 |
+
Example 2: chains with long skip connections. Here is a slightly worse, but still advantageous case for Jacobi iterations: each state only depends on far earlier states in the sequence via long skip connections. One simple instance is when $\mathbf{s}_1 = h_1(\mathbf{u})$ and $\mathbf{s}_t = h_t(\mathbf{u},\mathbf{s}_1)$ for $t > 1$ . The Jacobi method needs only 2 parallel iterations to obtain the correct values of all intermediate states, which leads to a speedup factor of $T / 2$ . We note that skip connections are commonly used in machine learning models, for example in ResNets (He et al., 2016), DenseNets (Huang et al., 2017), and the computational graph of RNN backpropagation due to shared weights across time steps.
|
| 155 |
+
|
| 156 |
+
Example 3: Markov chains. The worst case for Jacobi iterations happens when the recurrence relation is strictly Markov, i.e., $\mathbf{s}_1 = h_1(\mathbf{u})$ and $\mathbf{s}_t = h_t(\mathbf{s}_{t-1})$ for $t > 1$ . The Markov property ensures that when $t > 1$ , the only way for $\mathbf{s}_t$ to be influenced by the input $\mathbf{u}$ is through computing $\mathbf{s}_{t-1}$ . Therefore, as long as $\mathbf{s}_T$ depends on $\mathbf{u}$ in a non-trivial way, it will take at least $T$ parallel iterations for the Jacobi method to propagate information from $\mathbf{u}$ all the way to $\mathbf{s}_T$ .
|
| 157 |
+
|
| 158 |
+
In this case the running time of Jacobi matches that of GS or feedforward computation under our computation model.
|
| 159 |
+
|
| 160 |
+
In general, a recurrence relation can be represented as a directed acyclic graph (DAG) with $T + 1$ nodes $\{\mathbf{u},\mathbf{s}_1,\mathbf{s}_2,\dots ,\mathbf{s}_T\}$ to indicate computational dependency between states. The number of parallel iterations needed for the Jacobi method to converge is upper bounded by the critical path length (Kelley Jr & Walker, 1959) (i.e., the length of the longest path between all pairs of nodes), whereas the number of iterations required for standard feedforward computation is always $T$ . Therefore, Jacobi methods are better when the DAG has a smaller critical path length.
|
| 161 |
+
|
| 162 |
+
In the strict sense, DAGs of many feedforward processes in machine learning may not have a small critical path length. For example, DenseNets have a critical path length of $T$ since adjacent layers are connected, but empirically they enjoy substantial acceleration from Jacobi methods. This is because the influence of many connections is negligible (e.g., weights are small) and the DAG without these weak connections can have a much smaller effective critical path length. This frequently happens because models are learned rather than manually specified, and small numerical errors do not affect results.
|
| 163 |
+
|
| 164 |
+
We stress that all examples considered above are overly simplified for illustrative purposes. Our experiments in Section 5 are on much more complicated tasks—neither RNN backpropagation, DenseNet evaluation, nor autoregressive sampling has a computational graph as simple as those examples. Empirically, we observe that Jacobi iterations have larger advantages when the computational graph of a machine learning task contains many long skip connections (e.g., DenseNets), but fall short when the computational graph is closer to a Markov chain (e.g., ResNets). Both are in agreement with the intuition given by our examples.
|
| 165 |
+
|
| 166 |
+
# 4.2. When to Use Hybrid Solvers
|
| 167 |
+
|
| 168 |
+
Our idealized computation model introduced at the beginning of this section assumes that we have $T$ parallel processors, and updates in the recurrence relation at $t = 1,\dots ,T$ all have the same running time. When these assumptions do not hold, Jacobi-GS and GS-Jacobi are often more desirable than naive Jacobi iterations.
|
| 169 |
+
|
| 170 |
+
First, when fewer than $T$ parallel processors are available, we cannot directly apply the Jacobi method. In contrast, both Jacobi-GS and GS-Jacobi require a smaller number of parallel processors equal to the number of blocks and the block size respectively, and can thus be tuned at will.
|
| 171 |
+
|
| 172 |
+
Second, when the computation time is non-uniform across different $t$ , each parallel iteration of the Jacobi method will be bottlenecked by the slowest update across all time steps. One can use Jacobi-GS and GS-Jacobi to reduce this bottle-
|
| 173 |
+
|
| 174 |
+
neck, since the former can group different time steps so that each block takes roughly the same time to update, balancing the work load across different parallel processors; the latter can reduce the number of steps computed in parallel, leading to a smaller bottleneck during each GS update.
|
| 175 |
+
|
| 176 |
+
Third, when serial computation has unique advantages, the Jacobi method may have degraded performance as it is purely parallel. Under certain cases, the computation for $h_t(\mathbf{u}, \mathbf{s}_{1:t-1})$ can be cached to save the time for computing $h_{t+1}(\mathbf{u}, \mathbf{s}_{1:t})$ (cf., (Ramachandran et al., 2017) for autoregressive models). This makes sequential computations faster than independent executions in parallel, and therefore reduces the cost-effectiveness of Jacobi methods compared to feedforward computation. In contrast, both Jacobi-GS and GS-Jacobi are more advantageous because the sequential GS iterations within and between blocks can also benefit from the faster serial computation brought by caches.
|
| 177 |
+
|
| 178 |
+
Finally, Jacobi-GS often converges faster than Jacobi even without the above considerations. For example, the "block" Jacobi method in the context of solving linear triangular systems is equivalent to our Jacobi-GS when applied to linear recurrence relations, and is shown to enjoy faster convergence than naive Jacobi iterations (Chow et al., 2018).
|
| 179 |
+
|
| 180 |
+
# 4.3. Practical Recommendations
|
| 181 |
+
|
| 182 |
+
Block size in hybrid solvers. When using hybrid methods, we should ensure that each block requires a comparable amount of computation. For Jacobi-GS, a larger block size requires fewer parallel computing units at the cost of slower running speed, while it is the opposite for GS-Jacobi. Users should balance this trade-off based on their goals and availability of computing units.
|
| 183 |
+
|
| 184 |
+
Number of iterations. Determining the number of total iterations to run in advance is hard. Instead, we recommend an adaptive approach, where users stop the iteration once the forward difference (defined in Section 2) is below a chosen tolerance value $\epsilon$ (see Algorithm 1, 2 and 3).
|
| 185 |
+
|
| 186 |
+
# 5. Experiments
|
| 187 |
+
|
| 188 |
+
Here we empirically verify the effectiveness of our proposed algorithms on (i) the backpropagation of RNNs, (ii) the evaluation of neural networks, and (iii) the ancestral sampling of deep autoregressive models. We report the speedups of our algorithms measured with wall-clock time on real hardware, except for the DenseNet experiment where we simulate the performance due to the difficulty of implementing our methods in current deep learning frameworks like PyTorch (Paszke et al., 2019) and TensorFlow (Abadi et al., 2015). We provide the main experimental results with key details in this section, and relegate other details/results to Appendix C/D.
|
| 189 |
+
|
| 190 |
+

|
| 191 |
+
(a) RNN training
|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
(b) MADE sampling on MNIST
|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
(c) PixelCNN++ sampling on MNIST
|
| 198 |
+
|
| 199 |
+

|
| 200 |
+
(d) DenseNet evaluation
|
| 201 |
+
|
| 202 |
+

|
| 203 |
+
(e) MADE sampling on CIFAR-10
|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
(f) PixelCNN++ sampling on CIFAR-10
|
| 207 |
+
Figure 1. (a) The performance of Jacobi iterations on accelerating RNN training. Here we use "Jacobi_n" to denote the Jacobi method truncated at the $n$ -th iteration, and "feedforward" for standard backpropagation. All values are averaged over 10 runs and shaded areas denote $1/10$ standard deviations. (d) The performance of Jacobi-GS on evaluating DenseNets. The y-axis represents the number of incorrect labels in top-5 predictions. The shaded areas represent standard deviations across 100 random input images. (b)(e) The performance of feedforward sampling vs. Jacobi iterations for MADE. The shaded areas represent standard deviations computed over 100 runs. (c)(f) Comparing different sampling algorithms for PixelCNN++. Results are averaged over 10 runs and shaded areas show standard deviations.
|
| 208 |
+
|
| 209 |
+
# 5.1. Backpropagation of RNNs
|
| 210 |
+
|
| 211 |
+
We consider accelerating the training procedure of a recurrent neural network (RNN) with Jacobi iterations. The backward pass of RNNs can benefit from Jacobi-type approaches, because the loss function is connected to all time steps in the computation graph, and therefore gradient information can quickly flow from the final loss value to all hidden states with one Jacobi update.
|
| 212 |
+
|
| 213 |
+
To demonstrate this, we train a simple RNN with one hidden layer to fit sequences. The dataset is synthesized by flattening resized MNIST digits (resolution $10 \times 10$ ). We report how the training loss decreases with respect to wall-clock time in Fig. 1(a). Since the length of input sequences is fixed to 100, there are a total of 100 steps in the backward pass. We use "Jacobi_n" to denote the Jacobi approach truncated at the $n$ -th iteration ( $n \leq 100$ ), and "feedforward" corresponds to the standard backpropagation algorithm. In Fig. 3 (see Appendix D), we show how $\mathrm{Jacobi}_n$ converges to the true gradients with respect to $n$ . We can trade-off between the accuracy and speed of gradient computation by tuning $n$ . As demonstrated in Fig. 1(a), Jacobi methods can reduce the training time by around a factor of two with a proper $n$ .
|
| 214 |
+
|
| 215 |
+
# 5.2. Evaluating DenseNets
|
| 216 |
+
|
| 217 |
+
- DenseNets (Huang et al., 2017) are convolutional neural networks with a basic building block called the dense layer. Each dense layer contains two convolutions, and is connected to every other dense layer in a feedforward fashion. DenseNets are particularly suitable for Jacobi-type iterative approaches because information can quickly flow from input to output in one update via skip connections.
|
| 218 |
+
|
| 219 |
+
Setup. We use a DenseNet-201 model pre-trained on ImageNet (Russakovsky et al., 2015). We define a state in the corresponding recurrence relation to be the feature maps of a convolutional layer. We apply the Jacobi-GS method (Algorithm 2) to compute all states, where each dense layer (consisting of two states) is grouped as one block. We empirically verify that evaluating each dense layer separately takes comparable running time on GPUs. Therefore, by arranging these dense layers as blocks, Jacobi-GS can have roughly balanced workload for parallel execution.
|
| 220 |
+
|
| 221 |
+
Performance Metrics. For this task, full implementation of our algorithms will involve heterogeneous parallel execution of convolutional layers, which is not well supported by existing deep learning frameworks such as JAX (Brad
|
| 222 |
+
|
| 223 |
+
bury et al., 2018), PyTorch (Paszke et al., 2019) or TensorFlow (Abadi et al., 2015). Therefore, we estimate the speedup for a real parallel implementation by simulating the performance of Jacobi-GS with a purely sequential implementation, assuming no overheads due to parallelism. Specifically, we run each dense layer 10 times on the GPU and take the average to measure its wall-clock time, which we denote as $t_1, t_2, \dots, t_{98}$ , since there are 98 blocks in total. We then estimate one parallel iteration of Jacobi-GS with $\max_{1 \leq i \leq 98} t_i$ , and the time for full feedforward computation with $\sum_{i=1}^{98} t_i$ .
|
| 224 |
+
|
| 225 |
+
Results. We summarize the performance of Jacobi-GS in Fig. 1(d). We plot the curves of both error and forward difference (defined in Section 2.2.1), measured using the number of different labels in top-5 predictions. The results indicate that forward differences closely trace the ground-truth errors and therefore can be reliably used as a stopping criterion. As shown in Fig. 1(d), the estimated time for Jacobi-GS to converge is around 0.0131s, which is 2.1 times faster than 0.0279s, the estimated time needed for feedforward computation. Note that this is a theoretical speedup. The actual speedup might be smaller due to overheads of parallel execution.
|
| 226 |
+
|
| 227 |
+
# 5.3. Autoregressive Sampling
|
| 228 |
+
|
| 229 |
+
We consider two popular autoregressive models for image generation: MADE (Germain et al., 2015) and PixelCNN++ (Salimans et al., 2017). Both generate images pixel-by-pixel in raster scan order, and thus every pixel forms a state in the corresponding recurrence relation of feedforward computation.
|
| 230 |
+
|
| 231 |
+
# 5.3.1. MADE
|
| 232 |
+
|
| 233 |
+
For autoregressive sampling from MADE, each iteration of feedforward computation requires a forward propagation of the whole network, which equals the cost of one parallel Jacobi iteration. This means that sampling from MADE is a perfect use case for Jacobi iterations, where no extra parallelism is needed compared to naive feedforward computation.
|
| 234 |
+
|
| 235 |
+
Setup. We compared Jacobi iteration against feedforward sampling for models trained on MNIST (LeCun & Cortes, 2010) and CIFAR-10 (Krizhevsky et al., 2009) respectively. The experiments were repeated 100 times and we report the means and standard deviations measured in actual wall-clock time on a single NVIDIA Titan Xp GPU, accounting for all the overheads.
|
| 236 |
+
|
| 237 |
+
Results. For Jacobi iterations, the feedforward difference can accurately trace errors between the current and final samples, which is thus a good metric for convergence and
|
| 238 |
+
|
| 239 |
+
early stopping. In contrast, feedforward differences for the standard feedforward computation are not indicative of convergence. In terms of wall-clock time, Jacobi method only requires 0.013s to converge on MNIST, while feedforward computation needs 0.343s. This amounts to a speedup factor around 26. For CIFAR-10, the time difference is 0.119s vs. 3.026s, which implies a speedup factor around 25. The significant speedup achieved by Jacobi methods for MADE is highly practical. It not only accelerates image generation, but can also directly improve the speed for other models where MADE sampling is a sub-process, such as computing the likelihood of Inverse Autoregressive Flows (Kingma et al., 2016), and sampling from Masked Autoregressive Flows (Papamakarios et al., 2017).
|
| 240 |
+
|
| 241 |
+
# 5.3.2. PXIELCNN++
|
| 242 |
+
|
| 243 |
+
PixelCNN++ is a more advanced autoregressive model that typically achieves higher likelihood on image modeling tasks compared to MADE. In addition to the vanilla Jacobi method, we test the proposed hybrid methods, Jacobi-GS and GS-Jacobi. Feedforward sampling from PixelCNN++ can be accelerated by caching (Ramachandran et al., 2017), where the computation performed for one state is memorized to accelerate the computation of later states. As discussed in Section 4, parallel Jacobi updates cannot leverage these caches for faster sampling, and therefore one parallel update can be slower than one sequential update of feedforward sampling. Jacobi-GS and GS-Jacobi, in contrast, can take advantage of the caching mechanism since they incorporate sequential updates.
|
| 244 |
+
|
| 245 |
+
Setup. We use PixelCNN++ models trained on MNIST and CIFAR-10 datasets. Each experiment is performed 10 times and we show both mean and standard deviation in Fig. 1(c) and 1(f). We consider feedforward sampling with and without caches. We implement Jacobi iterations in the same way as MADE, where no cache is used. We modify the caching mechanisms from (Ramachandran et al., 2017) so that they can be applied to Jacobi-GS and GS-Jacobi approaches. For GS-Jacobi, one block contains 15 rows of pixels on MNIST and 2 rows of pixels on CIFAR-10. For Jacobi-GS, one block has one row of pixels on both datasets. All results of wall-clock time are measured on a single NVIDIA Tesla V100 GPU with 32 GB memory. The batch sizes are 16 and 4 for MNIST and CIFAR-10 respectively.
|
| 246 |
+
|
| 247 |
+
Results. We report the performance of different samplers in Tab. 1, and include a visual comparison of Jacobi iteration vs. feedforward sampling (i.e., the standard ancestral sampling) in Fig. 2. Compared to the standard feedforward computation (ancestral sampling) without caching, Jacobi, Jacobi-GS and GS-Jacobi all run significantly faster. Even
|
| 248 |
+
|
| 249 |
+
Table 1. Speedups for PixelCNN++ sampling on MNIST and CIFAR-10. Algorithms are stopped when the $\ell_{\infty}$ norm between the current sample and the ground-truth image is smaller than 0.01 (when the difference in samples is imperceptible to human eyes).
|
| 250 |
+
|
| 251 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">MNIST</td><td colspan="2">CIFAR-10</td></tr><tr><td>Time (s)</td><td>Speedup</td><td>Time (s)</td><td>Speedup</td></tr><tr><td>Feedforward w/o cache</td><td>12.15</td><td>1.00×</td><td>30.95</td><td>1.00×</td></tr><tr><td>Feedforward w/ cache</td><td>8.23</td><td>1.48×</td><td>17.76</td><td>1.74×</td></tr><tr><td>Jacobi</td><td>1.94</td><td>6.26×</td><td>26.16</td><td>1.18×</td></tr><tr><td>GS-Jacobi</td><td>1.86</td><td>6.53×</td><td>14.84</td><td>2.09×</td></tr><tr><td>Jacobi-GS</td><td>5.95</td><td>2.04×</td><td>14.76</td><td>2.10×</td></tr></table>
|
| 252 |
+
|
| 253 |
+

|
| 254 |
+
Figure 2. Feedforward (1st & 3rd rows) vs. Jacobi (2nd & 4th rows) sampling for PixelCNN++ on MNIST (top 2 rows) and CIFAR-10 (bottom 2 rows). Each column corresponds to the same number of updates. We show the first few intermediate samples on the left and the final image samples on the rightmost.
|
| 255 |
+
|
| 256 |
+
against feedforward sampling + caching, our GS-Jacobi and Jacobi-GS methods still perform uniformly better. Specifically, GS-Jacobi yields 6.53 and 2.09 times speedup (on MNIST and CIFAR-10) compared to the vanilla feedforward sampling without caching, and yields 4.42 and 1.20 times speedup against feedforward sampling + caching. Similarly, Jacobi-GS leads to speedup factors of 2.04 and 2.10 compared to the vanilla feedforward sampling, and still have speedup factors of 1.38 and 1.20 against feedforward sampling + caching. Compared to GS-Jacobi, Jacobi-GS may require fewer parallel processing units. For example, Jacobi-GS only requires 28 parallel computing units on MNIST, since there are 28 blocks and each block requires only one parallel device to run. In contrast, GS-Jacobi has a block size of $15 \times 28$ and requires the same number of parallel processing units. Our Jacobi method always outperforms the vanilla feedforward sampling without caching, with a speedup factor of 6.26 and 1.18 on MNIST and CIFAR-10 respectively. However, as demonstrated by our results on CIFAR-10 (see Tab. 1), Jacobi iterations may become slower than hybrid methods since the latter can exploit caching.
|
| 257 |
+
|
| 258 |
+
# 6. Related Work
|
| 259 |
+
|
| 260 |
+
Accelerating feedforward computation in the context of autoregressive sampling has been studied in the literature. In particular, van den Oord et al. (2018) propose probability density distillation to distill information from a slow autoregressive model to a faster sampler. However, it may provide samples from a different distribution compared to the original (slower) autoregressive model. MintNet (Song et al., 2019) proposes a fixed-point iteration method based on Newton-Raphson to speed up the inversion of an autoregressive procedure, but it is limited to a particular model. Similar ideas have also been proposed as a theoretical possibility in (Naumov, 2017) without experimental verifications.
|
| 261 |
+
|
| 262 |
+
Concurrently, Wiggers & Hoogeboom (2020) propose to accelerate autoregressive sampling with a fixed-point iteration method and demonstrate advantages over feedforward sampling (without caching) on PixelCNN++ models. Our Jacobi approach in Algorithm 1 is equivalent to theirs, but we additionally provide hybrid methods to improve the vanilla Jacobi approach, which are able to outperform feedforward sampling with caching. Our approaches are also more general, applicable to tasks beyond autoregressive sampling such as RNN training and DenseNet inference.
|
| 263 |
+
|
| 264 |
+
Common iterative solvers for linear equations include Jacobi, Gauss-Seidel, successive over-relaxation (SOR), and more general Krylov subspace methods. Forward/back substitution, as a process of solving lower/upper triangular linear systems, can also be viewed as instances of feedforward computation. Many approaches are proposed to accelerate and parallelize this procedure. Specifically, level scheduling (Saad, 2003) performs a topological sorting to find independent groups of variables that can be solved in parallel. Block-Jacobi iteration methods (Anzt et al., 2015; Chow et al., 2018), similar to the Jacobi-GS method in our paper, are proposed to maximize the parallel efficiency on GPUs.
|
| 265 |
+
|
| 266 |
+
Jacobi-type iterations are also used in message passing algorithms for probabilistic graphical models (Elidan et al., 2012; Niu et al., 2011) and graph neural networks (GNNs, Scarselli et al. (2008)). In particular, Gaussian belief propagation (GaBP) includes the Jacobi method as a special case (Bickson, 2008) when solving Gaussian Markov random fields. The core computation of GNNs is a parameterized message passing process where methods similar to block-Jacobi scheduling are popular (Liao et al., 2018).
|
| 267 |
+
|
| 268 |
+
# 7. Conclusion
|
| 269 |
+
|
| 270 |
+
By interpreting the feedforward computation as solving a triangular system of nonlinear equations, we show that numerical solvers can, in some cases, provide faster evaluation at the expense of additional parallel computing power.
|
| 271 |
+
|
| 272 |
+
In particular, we demonstrated that variants of Jacobi and Gauss-Seidel iterations are effective in accelerating the training of RNNs, the evaluation of DenseNets on ImageNet and the sampling from multiple deep autoregressive models, such as MADE and PixelCNN++, on several image datasets.
|
| 273 |
+
|
| 274 |
+
This observation opens up many new possible directions. We can build highly-optimized software packages to automatically parallelize some feedforward computation. More sophisticated numerical equation solving techniques, such as Krylov subspace methods and continuation methods, may provide greater acceleration than Jacobi or our hybrid methods. Our idea is particularly useful in time-critical applications, where trading parallel computing power for time is otherwise impossible.
|
| 275 |
+
|
| 276 |
+
Finally, we reiterate that our method is not beneficial for all feedforward computation. We require the process to tolerate numerical errors, have long skip connections, as well as have weak dependencies among various sequential stages that might be leveraged by numerical solvers (see the discussions in Section 4). Moreover, in some cases, it can be non-trivial for practical implementations to reap the benefits of acceleration that are possible in theory due to various overheads in software or hardware.
|
| 277 |
+
|
| 278 |
+
# Acknowledgements
|
| 279 |
+
|
| 280 |
+
This research was supported by Intel Corporation, TRI, NSF (#1651565, #1522054, #1733686), ONR (N00014-19-1-2145), AFOSR (FA9550-19-1-0024). Yang Song was supported by the Apple PhD Fellowship in AI/ML.
|
| 281 |
+
|
| 282 |
+
# References
|
| 283 |
+
|
| 284 |
+
Abadi, M., Agarwal, A., Barham, P., Brevdo, E., Chen, Z., Citro, C., Corrado, G. S., Davis, A., Dean, J., Devin, M., Ghemawat, S., Goodfellow, I., Harp, A., Irving, G., Isard, M., Jia, Y., Jozefowicz, R., Kaiser, L., Kudlur, M., Levenberg, J., Mané, D., Monga, R., Moore, S., Murray, D., Olah, C., Schuster, M., Shlens, J., Steiner, B., Sutskever, I., Talwar, K., Tucker, P., Vanhoucke, V., Vasudevan, V., Viégas, F., Vinyals, O., Warden, P., Wattenberg, M., Wicke, M., Yu, Y., and Zheng, X. TensorFlow: Large-scale machine learning on heterogeneous systems, 2015. URL https://www.tensorflow.org/. Software available from tensorflow.org.
|
| 285 |
+
Anzt, H., Chow, E., and Dongarra, J. Iterative sparse triangular solves for preconditioning. In European Conference on Parallel Processing, pp. 650-661. Springer, 2015.
|
| 286 |
+
Anzt, H., Chow, E., Szyld, D. B., and Dongarra, J. Domain overlap for iterative sparse triangular solves on gpus. In Software for Exascale Computing-SPPEXA 2013-2015, pp. 527-545. Springer, 2016.
|
| 287 |
+
|
| 288 |
+
Bickson, D. Gaussian belief propagation: Theory and application. arXiv preprint arXiv:0811.2518, 2008.
|
| 289 |
+
Bradbury, J., Frostig, R., Hawkins, P., Johnson, M. J., Leary, C., Maclaurin, D., Necula, G., Paszke, A., VanderPlas, J., Wanderman-Milne, S., and Zhang, Q. JAX: composable transformations of Python+NumPy programs, 2018. URL http://github.com/google/jax.
|
| 290 |
+
Chow, E., Anzt, H., Scott, J., and Dongarra, J. Using jacobi iterations and blocking for solving sparse triangular systems in incomplete factorization preconditioning. Journal of Parallel and Distributed Computing, 119:219-230, 2018.
|
| 291 |
+
Elidan, G., McGraw, I., and Koller, D. Residual belief propagation: Informed scheduling for asynchronous message passing. arXiv preprint arXiv:1206.6837, 2012.
|
| 292 |
+
Germain, M., Gregor, K., Murray, I., and Larochelle, H. Made: Masked autoencoder for distribution estimation. In International Conference on Machine Learning, pp. 881-889, 2015.
|
| 293 |
+
He, K., Zhang, X., Ren, S., and Sun, J. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.
|
| 294 |
+
Heek, J., Levskaya, A., Oliver, A., Ritter, M., Rondepierre, B., Steiner, A., and van Zee, M. Flax: A neural network library and ecosystem for JAX, 2020. URL http:// github.com/google/flax.
|
| 295 |
+
Huang, G., Liu, Z., Van Der Maaten, L., and Weinberger, K. Q. Densely connected convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4700-4708, 2017.
|
| 296 |
+
Kelley Jr, J. E. and Walker, M. R. Critical-path planning and scheduling. In *Papers presented at the December 1-3, 1959, eastern joint IRE-AIEE-ACM computer conference*, pp. 160-173, 1959.
|
| 297 |
+
Kingma, D. P., Salimans, T., Jozefowicz, R., Chen, X., Sutskever, I., and Welling, M. Improved variational inference with inverse autoregressive flow. In Advances in neural information processing systems, pp. 4743-4751, 2016.
|
| 298 |
+
Krizhevsky, A., Hinton, G., et al. Learning multiple layers of features from tiny images. 2009.
|
| 299 |
+
LeCun, Y. and Cortes, C. MNIST handwritten digit database. 2010. URL http://yann.learcun.com/exdb/mnist/.
|
| 300 |
+
|
| 301 |
+
Liao, R., Brockschmidt, M., Tarlow, D., Gaunt, A. L., Urtasun, R., and Zemel, R. Graph partition neural networks for semi-supervised classification. arXiv preprint arXiv:1803.06272, 2018.
|
| 302 |
+
Naumov, M. Parallel complexity of forward and backward propagation. arXiv preprint arXiv:1712.06577, 2017.
|
| 303 |
+
Niu, F., Ré, C., Doan, A., and Shavlik, J. Tuffy: Scaling up statistical inference in markov logic networks using an rdbms. Proceedings of the VLDB Endowment, 4(6), 2011.
|
| 304 |
+
Ortega, J. M. and Rheinboldt, W. C. Iterative solution of nonlinear equations in several variables, volume 30. Siam, 1970.
|
| 305 |
+
Papamakarios, G., Pavlakou, T., and Murray, I. Masked autoregressive flow for density estimation. In Advances in Neural Information Processing Systems, pp. 2338-2347, 2017.
|
| 306 |
+
Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems, pp. 8024-8035, 2019.
|
| 307 |
+
Ramachandran, P., Paine, T. L., Khorrami, P., Babaeizadeh, M., Chang, S., Zhang, Y., Hasegawa-Johnson, M. A., Campbell, R. H., and Huang, T. S. Fast generation for convolutional autoregressive models. arXiv preprint arXiv:1704.06001, 2017.
|
| 308 |
+
Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M., Berg, A. C., and Fei-Fei, L. ImageNet Large Scale Visual Recognition Challenge. International Journal of Computer Vision (IJCV), 115(3):211-252, 2015. doi: 10.1007/s11263-015-0816-y.
|
| 309 |
+
Saad, Y. Iterative methods for sparse linear systems, volume 82. siam, 2003.
|
| 310 |
+
Salimans, T., Karpathy, A., Chen, X., and Kingma, D. P. PixelCNN++: Improving the pixelCNN with discretized logistic mixture likelihood and other modifications. arXiv preprint arXiv:1701.05517, 2017.
|
| 311 |
+
Scarselli, F., Gori, M., Tsoi, A. C., Hagenbuchner, M., and Monfardini, G. The graph neural network model. IEEE Transactions on Neural Networks, 20(1):61-80, 2008.
|
| 312 |
+
Song, Y., Meng, C., and Ermon, S. Mintnet: Building invertible neural networks with masked convolutions. In Advances in Neural Information Processing Systems, pp. 11002-11012, 2019.
|
| 313 |
+
|
| 314 |
+
van den Oord, A., Li, Y., Babuschkin, I., Simonyan, K., Vinyals, O., Kavukcuoglu, K., Driessche, G., Lockhart, E., Cobo, L., Stimberg, F., et al. Parallel wavenet: Fast high-fidelity speech synthesis. In International Conference on Machine Learning, pp. 3918-3926, 2018.
|
| 315 |
+
Wiggers, A. and Hoogeboom, E. Predictive sampling with forecasting autoregressive models. In International Conference on Machine Learning, pp. 10260-10269. PMLR, 2020.
|
| 316 |
+
Young, D. M. Iterative solution of large linear systems. Elsevier, 2014.
|
acceleratingfeedforwardcomputationviaparallelnonlinearequationsolving/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c76d581a8cc4f5e7fda6326de35ef394cde76e891ef82a12b71dd6defc2a09e3
|
| 3 |
+
size 188628
|
acceleratingfeedforwardcomputationviaparallelnonlinearequationsolving/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5413370ea96e9eefb9d7e35b58e72f23cfd0112fb8e62544041ef8349fda9197
|
| 3 |
+
size 422408
|
acceleratinggossipsgdwithperiodicglobalaveraging/a88e4acc-b2d7-4bd2-a331-f7ac91a34d5e_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:206d8d0407ff067fb385a96aa43aef4e788de94f0384603902ffe6bf32d1ee09
|
| 3 |
+
size 95981
|
acceleratinggossipsgdwithperiodicglobalaveraging/a88e4acc-b2d7-4bd2-a331-f7ac91a34d5e_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ea6545e881be1c2acd07bb798b725208447bddb3f83f24baebfb9e4250d372df
|
| 3 |
+
size 116490
|
acceleratinggossipsgdwithperiodicglobalaveraging/a88e4acc-b2d7-4bd2-a331-f7ac91a34d5e_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cb55e9f15d38c46788f4a7da7fc3ecfed17f9f647b8ba99a9402fa135f3b1300
|
| 3 |
+
size 1255542
|
acceleratinggossipsgdwithperiodicglobalaveraging/full.md
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerating Gossip SGD with Periodic Global Averaging
|
| 2 |
+
|
| 3 |
+
Yiming Chen\*1 Kun Yuan\*1 Yingya Zhang 1 Pan Pan 1 Yinghui Xu 1 Wotao Yin
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
Communication overhead hinders the scalability of large-scale distributed training. Gossip SGD, where each node averages only with its neighbors, is more communication-efficient than the prevalent parallel SGD. However, its convergence rate is reversely proportional to quantity $1 - \beta$ which measures the network connectivity. On large and sparse networks where $1 - \beta \rightarrow 0$ , Gossip SGD requires more iterations to converge, which offsets against its communication benefit. This paper introduces Gossip-PGA, which adds Periodic Global Averaging into Gossip SGD. Its transient stage, i.e., the iterations required to reach asymptotic linear speedup stage, improves from $\Omega (\beta^4 n^3 /(1 - \beta)^4)$ to $\Omega (\beta^4 n^3 H^4)$ for non-convex problems. The influence of network topology in Gossip-PGA can be controlled by the averaging period $H$ . Its transient-stage complexity is also superior to Local SGD which has order $\Omega (n^{3}H^{4})$ . Empirical results of large-scale training on image classification (ResNet50) and language modeling (BERT) validate our theoretical findings.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
The scale of deep learning nowadays calls for efficient large-scale distributed training across multiple computing nodes in the data-center clusters. In distributed optimization, a network of $n$ nodes cooperate to solve the problem
|
| 12 |
+
|
| 13 |
+
$$
|
| 14 |
+
\min _ {x \in \mathbb {R} ^ {d}} \frac {1}{n} \sum_ {i = 1} ^ {n} [ f _ {i} (x) := \mathbb {E} _ {\boldsymbol {\xi} _ {i} \sim D _ {i}} F _ {i} (x; \boldsymbol {\xi} _ {i}) ] \tag {1}
|
| 15 |
+
$$
|
| 16 |
+
|
| 17 |
+
where each component $f_{i}$ is local and private to node $i$ and the random variable $\pmb{\xi}_{i}$ denotes the local data that follows distribution $D_{i}$ . We assume each node $i$ can locally evaluate stochastic gradients $\nabla F_{i}(x;\pmb{\xi}_{i})$ where $\pmb{\xi}_{i} \sim D_{i}$ , but must communicate to access information from other nodes.
|
| 18 |
+
|
| 19 |
+
*Equal contribution 1 Alibaba Group, Hangzhou, China. Correspondence to: Kun Yuan <kun.yuan@alibaba-inc.com>.
|
| 20 |
+
|
| 21 |
+
Proceedings of the $38^{th}$ International Conference on Machine Learning, PMLR 139, 2021. Copyright 2021 by the author(s).
|
| 22 |
+
|
| 23 |
+
<table><tr><td>METHOD</td><td>EPOCH</td><td>ACC.%</td><td>TIME(HRS.)</td></tr><tr><td>PARALLEL SGD</td><td>120</td><td>76.26</td><td>2.22</td></tr><tr><td>GOSSIP SGD (RING)</td><td>120</td><td>74.86</td><td>1.56</td></tr><tr><td>GOSSIP SGD (EXPO)</td><td>120</td><td>75.34</td><td>1.55</td></tr><tr><td>GOSSIP SGD (RING)</td><td>240</td><td>75.62</td><td>3.02</td></tr><tr><td>GOSSIP SGD (EXPO)</td><td>240</td><td>76.18</td><td>3.03</td></tr></table>
|
| 24 |
+
|
| 25 |
+
Table 1. Top-1 validation accuracy for ImageNet with 256 GPUs connected with the ring or one-peer exponential network. Gossip SGD takes more time to reach the same accuracy as Parallel SGD.
|
| 26 |
+
|
| 27 |
+
Parallel SGD methods are leading algorithms to solve (1), in which every node processes local training samples independently, and synchronize gradients every iteration either using a central Parameter Server (PS) (Li et al., 2014) or the All-Reduce communication primitive (Patarasuk & Yuan, 2009). The global synchronization in Parallel SGD either incurs significant bandwidth cost or high latency, which hampers the training scalability.
|
| 28 |
+
|
| 29 |
+
Many alternative methods have been proposed to reduce communication overhead in distributed training. Gossip SGD, also known as decentralized SGD (Nedic & Ozdaglar, 2009; Chen & Sayed, 2012; Lian et al., 2017; 2018; Assran et al., 2019), recently received lots of attention. This line of work lets each node communicate with (some of) their direct neighbors. In a sparse topology such as one-peer exponential graph (Assran et al., 2019), each node only communicates with one neighbor each time. This gossip-style communication is much faster than $PS$ and All-Reduce but the computed average can be highly inaccurate. Local SGD (Stich, 2019; Yu et al., 2019; Lin et al., 2018) is another line of work that increases the computation-to-communication ratio. Local SGD lets each node to run local gradient descent for multiple rounds and only average their parameters globally once in a while. By communicating less frequently, Local SGD reduces the communication overhead.
|
| 30 |
+
|
| 31 |
+
The reduced communication in Gossip and Local SGDs comes at a cost: slower convergence rate. While both algorithms are proved to have convergence linear speedup asymptotically, they are sensitive to network topology and synchronization period, respectively. For Gossip SGD, the convergence rate is inversely proportional to $1 - \beta$ ( $\beta$ is defined in Remark 1). Since $\beta \rightarrow 1$ on the large and sparse network topology which is most valuable for deep training, Gossip SGD will converge very slow and require more iterations than Parallel SGD to achieve a desired solution. This
|
| 32 |
+
|
| 33 |
+
<table><tr><td></td><td colspan="2">GOSSIP SGD</td><td colspan="2">GOSSIP-PGA</td></tr><tr><td></td><td>IID</td><td>NON-IID</td><td>IID</td><td>NON-IID (PROPOSED)</td></tr><tr><td>SMALL OR DENSE NETWORK (WHEN 1/1-β < H)</td><td>Ω(n3β4/(1-β)2)</td><td>Ω(n3β4/(1-β)4)</td><td>Ω(n3β4Cβ2)</td><td>Ω(n3β4Cβ2/(1-β)2)</td></tr><tr><td>LARGE OR SPARSE NETWORK (WHEN 1/1-β ≥ H)</td><td>Ω(n3β4/(1-β)2)</td><td>Ω(n3β4/(1-β)4)</td><td>Ω(n3β4Cβ2)</td><td>Ω(n3β4Cβ2H2)</td></tr></table>
|
| 34 |
+
|
| 35 |
+
Table 2. The lengths of the transient stages of Gossip SGD and Gossip-PGA. Since $C_{\beta} = \sum_{k=0}^{H-1} \beta^k = (1 - \beta^H) / (1 - \beta) < \min \{1 / (1 - \beta), H\}$ , Gossip-PGA always has shorter transient stage, more evident on large and sparse networks where $\beta \rightarrow 1$ .
|
| 36 |
+
|
| 37 |
+
<table><tr><td></td><td>LOCAL SGD</td><td>GOSSIP-PGA</td></tr><tr><td>IID SCENARIO</td><td>Ω(n3H2)</td><td>Ω(n3β4Cβ2)</td></tr><tr><td>NON-IID SCENARIO</td><td>Ω(n3H4)</td><td>Ω(n3β4Cβ2H2)</td></tr></table>
|
| 38 |
+
|
| 39 |
+
Table 3. The lengths of the transient stages of Local SGD and Gossip-PGA. Gossip-PGA always has shorter transient stages than Local SGD since $\beta < 1$ and $C_{\beta} < H$ . Such superiority becomes more significant on well-connected networks where $\beta \rightarrow 0$ .
|
| 40 |
+
|
| 41 |
+
may nullify its communication efficiency and result in even more training time (see Table 1). Local SGD with a large averaging period meets the same issue.
|
| 42 |
+
|
| 43 |
+
This paper proposes Gossip-PGA, which adds periodic AllReduce global averaging into Gossip to accelerate its convergence especially on large and sparse networks. Gossip-PGA also extends Local SGD with fast gossip-style communication after local updates. When the same averaging period $H$ is used, the additional gossip communication in Gossip-PGA endows it with faster convergence than Local SGD.
|
| 44 |
+
|
| 45 |
+
Challenges. Gossip-PGA can be regarded as a special form of the topology-changing Gossip SGD (Koloskova et al., 2020) and SlowMo (Wang et al., 2019) (in which the base optimizer is set as Gossip SGD, and the momentum coefficient $\beta = 0$ ). However, its theory and practical performance were not carefully investigated in literature. Unanswered important questions include how much acceleration can PGA bring to Gossip and Local SGDs, in what scenario can PGA benefits most, how to adjust the averaging period effectively, and how Gossip-PGA performs in large-scale deep learning systems. Providing quantitative answers to these questions requires new understanding on the interplay between gossip communication and global averaging period. Simply following existing analysis in (Koloskova et al., 2020) will result in incomplete conclusions, see Remark 5. Also, the analysis in SlowMo (Wang et al., 2019) does not consider heterogeneous data distributions and cannot cover our results.
|
| 46 |
+
|
| 47 |
+
# 1.1. Main Results
|
| 48 |
+
|
| 49 |
+
This paper proves that Gossip-PGA converges at
|
| 50 |
+
|
| 51 |
+
$$
|
| 52 |
+
O \left(\underbrace {\frac {\sigma}{\sqrt {n T}}} _ {\text {S G D r a t e}} + \underbrace {\frac {C _ {\beta} ^ {\frac {1}{3}} \beta^ {\frac {2}{3}} \left(\sigma^ {\frac {2}{3}} + D _ {\beta} ^ {\frac {1}{3}} b ^ {\frac {2}{3}}\right)}{T ^ {\frac {2}{3}}} + \frac {\beta D _ {\beta}}{T}} _ {\text {E x t r a o v e r h e a d}}\right) \tag {2}
|
| 53 |
+
$$
|
| 54 |
+
|
| 55 |
+
for both smooth convex and non-convex functions $f_{i}$ (the metrics used for both scenarios can be referred to Theorems 1 and 2), where $n$ is the network size, $T$ is the total number of iterations, $\sigma^2$ denotes gradient noise, $b^{2}$ gauges data heterogeneity, $\beta \in (0,1)$ measures how well the network is connected, $H$ is the global averaging period, and we define $C_\beta = \sum_{k=0}^{H-1} \beta^k$ and $D_\beta = \min \{H, 1 / (1 - \beta)\}$ .
|
| 56 |
+
|
| 57 |
+
Linear speedup. When $T$ is sufficiently large, the first term $1 / \sqrt{nT}$ dominates (2). This also applies to Parallel, Local, and Gossip SGDs. Gossip-PGA and these algorithms all require $T = \Omega(1/(n\epsilon^2))$ iterations to reach a desired accuracy $\epsilon$ , which is inversely proportional to $n$ . We say an algorithm is in its linear-speedup stage at $T$ th iteration if, for this $T$ , the term involving $nT$ is dominating the rate.
|
| 58 |
+
|
| 59 |
+
Transient stage. Transient stage is referred to those iterations before an algorithm reaches its linear-speedup stage, that is iterations $1,\ldots ,T$ where $T$ is relatively small so non $nT$ terms (i.e., the extra overhead terms in (2)) still dominate the rate. We take Gossip-PGA in the non-iid scenario $(b^{2 / 3}\geq \sigma)$ as example. To reach linear speedup, $T$ has to satisfy $T^{\frac{2}{3}} / (C_{\beta}^{\frac{1}{3}}\beta^{\frac{2}{3}}D_{\beta}^{1 / 3})\geq n^{\frac{1}{2}}T^{\frac{1}{2}}$ , i.e., $T\geq n^{3}\beta^{4}C_{\beta}^{2}D_{\beta}^{2}$ . So, the transient stage has $\Omega (n^{3}\beta^{4}C_{\beta}^{2}D_{\beta}^{2})$ iterations. Transient stage is an important metric to measure the scalability of distributed algorithms.
|
| 60 |
+
|
| 61 |
+
Shorter transient stage than Gossip SGD. The transient stage comparison between Gossip SGD and Gossip-PGA is shown in Table 2. Since $C_{\beta} = (1 - \beta^{H}) / (1 - \beta) < \min \{H, 1 / (1 - \beta)\}$ , we conclude Gossip-PGA always has a shorter transient stage than Gossip SGD for any $\beta$ and $H$ . Moreover, the superiority of Gossip-PGA becomes evident when the network is large and sparse, i.e., $1 - \beta \rightarrow 0$ . In this case, the transient stage of Gossip SGD can grow dramatically (see the second line in Table 2) while Gossip-PGA is controlled by the global period $H$ because $C_{\beta} < H$ . As a result, Gossip-PGA improves the transient stage of Gossip-SGD from $O(n^{3} / (1 - \beta)^{4})$ (or $O(n^{3} / (1 - \beta)^{2}$ in the iid scenario) to $O(n^{3})$ when $\beta \rightarrow 1$ .
|
| 62 |
+
|
| 63 |
+
Shorter transient stage than Local SGD. The transient stage comparison between Local SGD and Gossip-PGA is shown in Table 3. Using $C_{\beta} < H$ , we find Gossip-PGA is always endowed with a shorter transient stage than Local SGD. Moreover, when the network is well-connected such
|
| 64 |
+
|
| 65 |
+
that $\beta \rightarrow 0$ , it holds that $C_{\beta} \rightarrow 1$ . Gossip-PGA will have a significantly shorter transient stage than Local SGD.
|
| 66 |
+
|
| 67 |
+
# 1.2. Contributions
|
| 68 |
+
|
| 69 |
+
- We establish the convergence rate of Gossip-PGA for both smooth convex and non-convex problems. Our results clarify how gossip communication and periodic global averaging collaborate to improve the transient stage of Gossip and Local SGDs. We also established shorter wall-clock training times of Gossip-PGA.
|
| 70 |
+
- We propose Gossip-AGA, which has adaptive global averaging periods. Gossip-AGA automatically adjusts $H$ and has convergence guarantees.
|
| 71 |
+
- We conduct various experiments (convex logistic regression and large-scale deep learning tasks) to validate all established theoretical results. In particular, the proposed Gossip-PGA/AGA achieves a similar convergence speed to parallel SGD in iterations, but provides $1.3 \sim 1.9 \times$ runtime speed-up. The introduced global averaging steps in Gossip-PGA/AGA remedy the accuracy degradation in Gossip SGD and Local SGD.
|
| 72 |
+
|
| 73 |
+
# 2. Related Work
|
| 74 |
+
|
| 75 |
+
Decentralized optimization algorithms can be tracked back to (Tsitsiklis et al., 1986). After that, decentralized optimization has been intensively studied in signal processing and control community. Decentralized gradient descent (DGD) (Nedic & Ozdaglar, 2009), diffusion (Chen & Sayed, 2012) and dual averaging (Duchi et al., 2011) are among the first decentralized algorithms that target on general optimization problems. However, these algorithms suffer from a bias caused by data heterogeneity (Yuan et al., 2016). Various primal-dual algorithms are proposed to overcome this issue, and they are based on alternating direction method of multipliers (ADMM) (Shi et al., 2014), explicit bias-correction (Shi et al., 2015; Yuan et al., 2019; Li et al., 2019c), gradient tracking (Xu et al., 2015; Di Lorenzo & Scutari, 2016; Nedic et al., 2017; Qu & Li, 2018), coordinate-descent methods (He et al., 2018), and dual acceleration (Scaman et al., 2017; Uribe et al., 2020).
|
| 76 |
+
|
| 77 |
+
In the context of machine learning, decentralized SGD, also known as Gossip SGD, have gained a lot of attention recently. (Lian et al., 2017) first proves Gossip SGD can reach the same linear speedup as vanilla parallel SGD. After that, (Assran et al., 2019) comes out to extend Gossip SGD to directed topology. A recent work (Koloskova et al., 2020) proposes a unified framework to analyze algorithms with changing topology and local updates. While it covers Gossip-PGA as a special form, the theoretical and practical benefits of periodic global averaging were not studied therein. The data heterogeneity issue suffered in Gossip
|
| 78 |
+
|
| 79 |
+
SGD is discussed and addressed in (Tang et al., 2018; Yuan et al., 2020; Lu et al., 2019; Xin et al., 2020). Gossip SGD is also extended to asynchronous scenarios in (Lian et al., 2018; Luo et al., 2020).
|
| 80 |
+
|
| 81 |
+
Local SGD can be traced back to (Zinkevich et al., 2010) which proposed a one-shot averaging. More frequent averaging strategy is proposed in (Zhang et al., 2016), and the convergence property of Local SGD is established in (Yu et al., 2019; Stich, 2019; Bayoumi et al., 2020). Local SGD is also widely-used in federated learning (McMahan et al., 2017; Li et al., 2019a).
|
| 82 |
+
|
| 83 |
+
Another closely related work (Wang et al., 2019) proposes a slow momentum (SlowMo) framework, where each node, similar to the Gossip-PGA algorithm proposed in this paper, periodically synchronizes across the network and performs a momentum update. The analysis in SlowMo cannot cover the convergence results in this paper due to its data-homogeneous setting. In addition, we will clarify some new questions such as how much acceleration can PGA bring to Gossip and Local SGDs, and how to adjust the averaging period effectively.
|
| 84 |
+
|
| 85 |
+
Various techniques can be integrated to Gossip SGD to improve its communication efficiency. This paper does not consider quantization (Alistarh et al., 2017; Bernstein et al., 2018), gradient compression (Tang et al., 2019; Koloskova et al., 2019b;a) and lazy communication (Chen et al., 2018; Liu et al., 2019), but these orthogonal techniques can be added to our methods.
|
| 86 |
+
|
| 87 |
+
# 3. Gossip SGD with Periodic Global Average
|
| 88 |
+
|
| 89 |
+
Assume all computing nodes are connected over a graph $\mathcal{G} = \{\mathcal{V},\mathcal{E}\}$ where $\mathcal{V} = \{1,2,\dots ,n\}$ denote the node index and $\mathcal{E}$ denote the communication links between all nodes. Similar to existing decentralized algorithms (Nedic & Ozdaglar, 2009; Chen & Sayed, 2012; Lian et al., 2017; Assran et al., 2019), information exchange in the gossip step is only allowed to occur between connected neighbors. To characterize the decentralized communication, we let $W\in \mathbb{R}^{n\times n}$ be a doubly stochastic matrix, i.e., $W\geq 0$ $W\mathbb{1}_n = \mathbb{1}_n$ and $\mathbb{1}_n^T W = \mathbb{1}_n^T$ . The $(i,j)$ -th element $w_{ij}$ is the weight to scale information flowing from node $j$ to node $i$ . If nodes $i$ and $j$ are not neighbors then $w_{ij} = 0$ , and if they are neighbors or identical then the weight $w_{ij} > 0$ Furthermore, we define $\mathcal{N}_i$ as the set of neighbors of node $i$ which also includes node $i$ itself.
|
| 90 |
+
|
| 91 |
+
The Gossip-PGA algorithm is listed in Algorithm 1. In the gossip step, every node $i$ collects information from all its connected neighbors. For global average step, nodes synchronize their model parameters using the efficient AllReduce primitives. When $H \to \infty$ , Gossip-PGA will reduce to standard Gossip SGD; when $W = \frac{1}{n} \mathbb{1} \mathbb{1}_n$ , Gossip-
|
| 92 |
+
|
| 93 |
+
# Algorithm 1 Gossip-PGA
|
| 94 |
+
|
| 95 |
+
Require: Initialize learning rate $\gamma > 0$ , weight matrix $W$ , global averaging period $H$ , and let each $\boldsymbol{x}_i^{(0)}$ to be equivalent to each other.
|
| 96 |
+
|
| 97 |
+
for $k = 0,1,2,\ldots ,T - 1$ every node $i$ do
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\text {S a m p l e} \boldsymbol {\xi} _ {i} ^ {(k + 1)}, \text {u p d a t e} \boldsymbol {g} _ {i} ^ {(k)} = \nabla F _ {i} (\boldsymbol {x} _ {i} ^ {(k)}; \boldsymbol {\xi} _ {i} ^ {(k + 1)})
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
\boldsymbol {x} _ {i} ^ {(k + \frac {1}{2})} = \boldsymbol {x} _ {i} ^ {(k)} - \gamma \boldsymbol {g} _ {i} ^ {(k)} \quad \triangleright \text {L o c a l S G D u p d a t e}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\text {i f} \mod (k + 1, H) = 0 \text {t h e n}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
$$
|
| 112 |
+
\left\lfloor \boldsymbol {x} _ {i} ^ {(k + 1)} = \frac {1}{n} \sum_ {j = 1} ^ {n} \boldsymbol {x} _ {j} ^ {(k + \frac {1}{2})} \right. \quad \triangleright \text {g l o b a l a v e r a g e}
|
| 113 |
+
$$
|
| 114 |
+
|
| 115 |
+
else
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
\left\lfloor \boldsymbol {x} _ {i} ^ {(k + 1)} = \sum_ {j \in \mathcal {N} _ {i}} w _ {i j} \boldsymbol {x} _ {j} ^ {(k + \frac {1}{2})} \triangleright \text {o n e g o s s i p s t e p} \right.
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
PGA will reduce to vanilla parallel SGD; when $W = I$ , Gossip-PGA will reduce to Local SGD.
|
| 122 |
+
|
| 123 |
+
All-Reduce v.s. multiple Gossips. In a computing cluster with $n$ nodes, global averaging is typically conducted in an efficient Ring All-Reduce manner, rather than via multiple gossip steps as in (Berahas et al., 2018). The communication time comparison between a single gossip and Ring All-Reduce step is listed in Appendix H. In the one-peer exponential network, the exact global average can be achieved via $\ln(n)$ gossip communications, which generally takes more wall-clock time than a single Ring All-Reduce operation. Therefore, we recommend exploiting All-Reduce to conduct global averaging in Gossip-PGA.
|
| 124 |
+
|
| 125 |
+
Data-center v.s. wireless network. This paper considers deep training within high-performance data-center clusters, in which all GPUs are connected with high-bandwidth channels and the network topology can be fully controlled. Under such setting, the periodic global averaging conducted with Ring All-Reduce has tolerable communication cost, see Appendix H. For scenarios where global averaging is extremely expensive to conduct such as in wireless sensor network, the global averaging can be approximated via multiple gossip steps, or may not be recommended.
|
| 126 |
+
|
| 127 |
+
# 3.1. Assumptions and analysis highlights
|
| 128 |
+
|
| 129 |
+
We now establish convergence rates for Gossip-PGA on smooth convex and non-convex problems. For all our theoretical results we make the following standard assumptions.
|
| 130 |
+
|
| 131 |
+
Assumption 1 ( $L$ -SMOOTHNESS). Each local cost function $f_{i}(x)$ is differentiable, and there exists a constant $L$ such that for each $\mathbf{x}, \mathbf{y} \in \mathbb{R}^{d}$ :
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
\| \nabla f _ {i} (\boldsymbol {x}) - \nabla f _ {i} (\boldsymbol {y}) \| \leq L \| \boldsymbol {x} - \boldsymbol {y} \|. \tag {3}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
Assumption 2 (GRADIENT NOISE). Recall $\pmb{g}_i^{(k)}$ is the stochastic gradient noise defined in line 2 of Algorithm
|
| 138 |
+
|
| 139 |
+
1. It is assumed that for any $k$ and $i$ that
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
\mathbb {E} \left[ \boldsymbol {g} _ {i} ^ {(k)} - \nabla f _ {i} \left(\boldsymbol {x} _ {i} ^ {(k)}\right) \mid \mathcal {F} ^ {(k - 1)} \right] = 0, \tag {4}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
\mathbb {E} \left[ \| \boldsymbol {g} _ {i} ^ {(k)} - \nabla f _ {i} \left(\boldsymbol {x} _ {i} ^ {(k)}\right) \| ^ {2} \mid \mathcal {F} ^ {(k - 1)} \right] \leq \sigma^ {2} \tag {5}
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+
for some constant $\sigma^2 > 0$ . Moreover, we assume $\pmb{\xi}_i^{(k)}$ is independent of each other for any $k$ and $i$ . Filtration is defined as $\mathcal{F}^{(k)} = \{\{\pmb{x}_i^{(k)}\}_{i=1}^n, \{\pmb{\xi}_i^{(k)}\}_{i=1}^n, \dots, \{\pmb{x}_i^{(0)}\}_{i=1}^n, \{\pmb{\xi}_i^{(0)}\}_{i=1}^n\}$
|
| 150 |
+
|
| 151 |
+
Assumption 3 (WEIGHTING MATRIX). The network is strongly connected and the weight matrix $W$ satisfies $W\mathbb{1}_n = \mathbb{1}_n$ , $\mathbb{1}_n^T W = \mathbb{1}_n^T$ , $\mathrm{null}(I - W) = \mathrm{span}(\mathbb{1}_n)$ . We also assume $\| W - \frac{1}{n}\mathbb{1}\mathbb{1}^T\|_2 \leq \beta$ for some $\beta \in (0,1)$ .
|
| 152 |
+
|
| 153 |
+
Remark 1. Quantity $\beta \in (0,1)$ indicates how well the topology is connected. Smaller $\beta$ indicates better-connected network while larger $\beta$ implies worse-connected topology.
|
| 154 |
+
|
| 155 |
+
Analysis highlights. To derive the influence of periodic global averaging, we have to exploit all useful algorithm structures to achieve its superiority. These structures are:
|
| 156 |
+
|
| 157 |
+
- $\pmb{x}_i^{(k)} = \bar{\pmb{x}}^{(k)}$ when mod $(k, H) = 0$ . This structure relieves the influence of network topology;
|
| 158 |
+
- Gossip communications within each period also contribute to consensus among nodes. This structure is crucial to establish superiority to Local SGD;
|
| 159 |
+
- When network is large and sparse, i.e., $H < \frac{1}{1 - \beta}$ , the global averaging is more critical to drive consensus. This structure is crucial to establish superiority to Gossip SGD when $H < \frac{1}{1 - \beta}$ .
|
| 160 |
+
- When network is small or dense, i.e., $H > \frac{1}{1 - \beta}$ , gossip communication is more critical to drive consensus. This structure is crucial to establish superiority to Gossip SGD when $H > \frac{1}{1 - \beta}$ .
|
| 161 |
+
|
| 162 |
+
Ignoring any of the above structures in the analysis will result in incomplete conclusions on comparison among Gossip-PGA, Gossip SGD and Local SGD.
|
| 163 |
+
|
| 164 |
+
# 3.2. Convergence analysis: convex scenario
|
| 165 |
+
|
| 166 |
+
Assumption 4 (CONVEXITY). Each $f_{i}(x)$ is convex.
|
| 167 |
+
|
| 168 |
+
Definition 1 (DATA HETEROGENEITY). When each $f_{i}(x)$ is convex, we let $b^{2} = \frac{1}{n}\sum_{i = 1}^{n}\| \nabla f_{i}(x^{\star})\|^{2}$ denote the data heterogeneity.
|
| 169 |
+
|
| 170 |
+
When each local data follows the same distribution, it holds that $f_{i}(x) = f(x) \forall i$ and hence $\nabla f_{i}(x^{\star}) = \nabla f(x^{\star}) = 0$ which also implies $b^{2} = 0$ . With Assumption 4, we let $x^{\star}$ be one of the global solutions to problem (1).
|
| 171 |
+
|
| 172 |
+
<table><tr><td></td><td colspan="3">GOSSIP SGD (KOLOSKOVA ET AL., 2020)</td><td colspan="3">GOSSIP-PGA</td></tr><tr><td>RATES (GENERAL FORM)</td><td colspan="2">O\(\left( \frac{\sigma}{\sqrt{nT}} + \frac{\beta^{\frac{2}{3}}\sigma^{\frac{2}{3}}}{T^{\frac{2}{3}}(1-\beta)^{\frac{1}{3}}} + \frac{\beta^{\frac{2}{3}}b^{\frac{2}{3}}}{T^{\frac{2}{3}}(1-\beta)^{\frac{2}{3}}} + \frac{\beta}{(1-\beta)T} \right)\)</td><td colspan="3">O\(\left( \frac{\sigma}{\sqrt{nT}} + \frac{C_{\beta}^{\frac{1}{3}}\beta^{\frac{2}{3}}\sigma^{\frac{2}{3}}}{T^{\frac{2}{3}}} + \frac{C_{\beta}^{\frac{1}{3}}D_{\beta}^{\frac{1}{3}}\beta^{\frac{2}{3}}b^{\frac{2}{3}}}{T^{\frac{2}{3}}} + \frac{\beta D_{\beta}}{T} \right)\)</td><td></td></tr><tr><td>RATES (WHEN 1/1-β< H)</td><td colspan="2">O\(\left( \frac{\sigma}{\sqrt{nT}} + \frac{\beta^{\frac{2}{3}}\sigma^{\frac{2}{3}}}{T^{\frac{2}{3}}(1-\beta)^{\frac{1}{3}}} + \frac{\beta^{\frac{2}{3}}b^{\frac{2}{3}}}{T^{\frac{2}{3}}(1-\beta)^{2\frac{2}{3}}} + \frac{\beta}{(1-\beta)T} \right)\)</td><td colspan="3">O\(\left( \frac{\sigma}{\sqrt{nT}} + \frac{C_{\beta}^{\frac{1}{3}}\beta^{\frac{2}{3}}\sigma^{\frac{2}{3}}}{T^{\frac{2}{3}}} + \frac{C_{\beta}^{\frac {1}{3}}\beta^{\frac{2}{3}}b^{\frac{2}{3}}}{(1-\beta)^{\frac{1}{3}}T^{\frac{2}{3}}} + \frac{\beta}{(1-\beta)T} \right)\)</td><td></td></tr><tr><td>RATES (WHEN 1/1-β≥ H)</td><td colspan="2">O\(\left( \frac{\sigma}{\sqrt{nT}} + \frac{\beta^{\frac{2}{3}}\sigma^{\frac{2}{3}}}{T^{\frac{2}{3}}(1-\beta)^{\frac{1}{3}}} + \frac{\beta^{\frac{2}{3}}b^{\frac{2}{3}}}{T^{\frac{2}{3}}(1-\beta) ^{\frac{2}{3}}} + \frac{\beta}{(1-\beta)T} \right)\)</td><td colspan="3">O\(\left( \frac{\sigma}{\sqrt{nT}} + \frac{C_{\beta}^{\frac{1}{3}}\beta^{\frac{2}{3}}\sigma^{\frac{2}{3}}}{T^{\frac{2}{3}}} + \frac{C_{\beta}^{\underline{1}}H^{\underline{1}}\beta^{\underline{3}}b^{\underline{3}}}{T^{\frac{2}{3}}} + \frac{\beta H}{T} \right)\)</td><td></td></tr></table>
|
| 173 |
+
|
| 174 |
+
Theorem 1. Under Assumptions 1-4, if $\gamma$ is chosen as
|
| 175 |
+
|
| 176 |
+
$$
|
| 177 |
+
\gamma = \min \left\{\frac {1}{1 2 \beta L D _ {\beta}}, \left(\frac {r _ {0}}{r _ {1} (T + 1)}\right) ^ {\frac {1}{2}}, \left(\frac {r _ {0}}{r _ {2} (T + 1)}\right) ^ {\frac {1}{3}} \right\} \tag {6}
|
| 178 |
+
$$
|
| 179 |
+
|
| 180 |
+
with constants $r_0 = 2\mathbb{E}\| \bar{\pmb{x}}^{(0)} - x^{\star}\|^{2},r_{1} = 2\sigma^{2} / n$ , and $r_2 = 6L\beta^2 C_\beta \sigma^2 +18L\beta^2 C_\beta D_\beta$ it holds for any $T$ that
|
| 181 |
+
|
| 182 |
+
$$
|
| 183 |
+
\begin{array}{l} \mathbb {E} f (\hat {\boldsymbol {x}} ^ {(T)}) - f (x ^ {\star}) \\ = O \left(\frac {\sigma}{\sqrt {n T}} + \frac {C _ {\beta} ^ {\frac {1}{3}} \beta^ {\frac {2}{3}} \left(\sigma^ {\frac {2}{3}} + D _ {\beta} ^ {\frac {1}{3}} b ^ {\frac {2}{3}}\right)}{T ^ {\frac {2}{3}}} + \frac {\beta D _ {\beta}}{T}\right) \tag {7} \\ \end{array}
|
| 184 |
+
$$
|
| 185 |
+
|
| 186 |
+
where $\bar{\pmb{x}}^{(k)} = \frac{1}{n}\sum_{i = 1}^{n}\pmb{x}_{i}^{(k)},\hat{\pmb{x}}^{(T)} = \frac{1}{T + 1}\sum_{k = 0}^{T}\bar{\pmb{x}}^{(k)},$ $C_\beta = \sum_{k = 0}^{H - 1}\beta^k$ and $D_{\beta} = \min \{H,1 / (1 - \beta)\}$ . (Proof is in Appendix B.)
|
| 187 |
+
|
| 188 |
+
Remark 2. When $\beta \to 0$ , i.e., the network tends to be fully connected, Gossip-PGA will converge at rate $O(\sigma / \sqrt{nT})$ which recovers the rate of parallel SGD.
|
| 189 |
+
|
| 190 |
+
Remark 3. When $\beta \to 1$ , i.e., the information exchange via gossip communication is inefficient, it holds that $C_{\beta} \to H$ and $D_{\beta} = \min \{H, 1 / (1 - \beta)\} = H$ . Substituting them to (7) will recover the rate of Local SGD, see Table 6.
|
| 191 |
+
|
| 192 |
+
Remark 4. When $H \to \infty$ , i.e., the networked agents tend not to conduct global synchronization, it holds that $C_{\beta} \to 1 / (1 - \beta)$ and $D_{\beta} = \frac{1}{1 - \beta}$ . Substituting these values to (7) will recover the rate of Gossip SGD, see Table 4.
|
| 193 |
+
|
| 194 |
+
# 3.3. Convergence analysis: non-convex scenario
|
| 195 |
+
|
| 196 |
+
We first introduce an assumption about data heterogeneity specifically for non-convex problems:
|
| 197 |
+
|
| 198 |
+
Assumption 5 (DATA HETEROGENEITY). There exists constant $\hat{b} > 0$ such that $\frac{1}{n}\sum_{i=1}^{n}\|\nabla f_i(\pmb{x}) - \nabla f(\pmb{x})\|^2 \leq \hat{b}^2$ for any $x \in \mathbb{R}^d$ . If local data follows the same distribution, it holds that $\hat{b} = 0$ .
|
| 199 |
+
|
| 200 |
+
Theorem 2. Under Assumptions 1-3 and 5, if $\gamma$ satisfies the condition (6) (replace $b^2$ with $\hat{b}^2$ and use $r_0 = 4\mathbb{E}f(\bar{\boldsymbol{x}}^{(0)})$ ), it holds for any $T > 0$ that
|
| 201 |
+
|
| 202 |
+
$$
|
| 203 |
+
\frac {1}{T + 1} \sum_ {k = 0} ^ {T} \mathbb {E} \| \nabla f (\bar {\boldsymbol {x}} ^ {(k)}) \| ^ {2}
|
| 204 |
+
$$
|
| 205 |
+
|
| 206 |
+
Table 4. Convergence rate comparison between Gossip SGD and Gossip-PGA for smooth convex/non-convex problems. We use notation $b^{2}$ to indicate the data heterogeneity for both convex and non-convex scenarios.
|
| 207 |
+
|
| 208 |
+
<table><tr><td></td><td>GOSSIP SGD</td><td>GOSSIP-PGA</td></tr><tr><td>TRANSIENT ITER.</td><td>O(n7)</td><td>O(n5)</td></tr><tr><td>SINGLE COMM.</td><td>O(θd + α)</td><td>O(θd + √nα)</td></tr><tr><td>TRANSIENT TIME</td><td>O(n7θd + n7α)</td><td>O(n5θd + n5.5α)</td></tr></table>
|
| 209 |
+
|
| 210 |
+
Table 5. Transient time comparison between non-iid Gossip SGD and Gossip-PGA over the specific grid $(1 - \beta = O(1 / n))$ topology. We choose $H = \sqrt{n}$ as the period in Gossip-PGA.
|
| 211 |
+
|
| 212 |
+
$$
|
| 213 |
+
= O \left(\frac {\sigma}{\sqrt {n T}} + \frac {C _ {\beta} ^ {\frac {1}{3}} \beta^ {\frac {2}{3}} \left(\sigma^ {\frac {2}{3}} + D _ {\beta} ^ {\frac {1}{3}} b ^ {\frac {2}{3}}\right)}{T ^ {\frac {2}{3}}} + \frac {\beta D _ {\beta}}{T}\right) \tag {8}
|
| 214 |
+
$$
|
| 215 |
+
|
| 216 |
+
where $\bar{\pmb{x}}^{(k)} = \frac{1}{n}\sum_{i = 1}^{n}\pmb{x}_i^{(k)}$ . (Proof is in Appendix C.)
|
| 217 |
+
|
| 218 |
+
# 3.4. Comparison with Gossip SGD
|
| 219 |
+
|
| 220 |
+
To better illustrate how periodic global averaging helps relieve the affects of network topology in Gossip SGD, we list convergence rates of Gossip SGD and Gossip-PGA for smooth convex or non-convex problems in Table 4. The first line is the general rate expression for both algorithms. In the second line we let $D_{\beta} = \min \{H,1 / (1 - \beta)\} = 1 / (1 - \beta)$ for Gossip-PGA, and in the third line we let $D_{\beta} = H$ . According to this table, we derive the transient stages of Gossip SGD and Gossip-PGA for each scenarios (i.e., large/small network, iid/non-iid data distributions) in Table 2 (see the derivation detail in Appendix D). As we have explained in Main Results subsection in the introduction, it is observed from Tables 2 and 4 that: (i) Gossip-PGA always converges faster (or has shorter transient stages) than Gossip SGD for any $\beta$ and $H$ value. (ii) Such superiority gets evident for large and sparse networks where $\beta \rightarrow 1$ .
|
| 221 |
+
|
| 222 |
+
Remark 5. The convergence analysis in topology-changing Gossip SGD (Koloskova et al., 2020) covers Gossip-PGA. By letting $p = 1$ and $\tau = H$ in Theorem 2 of (Koloskova et al., 2020), it is derived that Gossip-PGA has a transient stage on the order of $\Omega(n^3 H^4)$ for non-convex non-iid scenario. Such transient stage cannot quantify the superiority to Gossip and Local SGDs. In fact, it may even show PGA can do harm to Gossip SGD when $H > \frac{1}{1 - \beta}$ , which is counter-intuitive. This is because (Koloskova et al., 2020) is for the general time-varying topology. It does not utilize
|
| 223 |
+
|
| 224 |
+
<table><tr><td colspan="2">RATES</td></tr><tr><td>L-SGD</td><td>O\(\left( \frac{\sigma}{\sqrt{nT}} + \frac{H^{\frac{1}{3}}\sigma^{\frac{2}{3}}}{T^{\frac{2}{3}}} + \frac{H^{\frac{2}{3}}b^{\frac{2}{3}}}{T^{\frac{2}{3}}} + \frac{H}{T} \right)\)</td></tr><tr><td>G-PGA</td><td>O\(\left( \frac{\sigma}{\sqrt{nT}} + \frac{C_{\beta}^{\frac{1}{3}}\beta_{3}^{\frac{2}{3}}\sigma^{\frac{2}{3}}}{T^{\frac{2}{3}}} + \frac{C_{\beta}^{\frac{1}{3}}H^{\frac{1}{3}}\beta_{3}^{\frac{2}{3}}b^{\frac{2}{3}}}{T^{\frac{2}{3}}} + \frac{\beta H}{T} \right)\)</td></tr></table>
|
| 225 |
+
|
| 226 |
+
Table 6. Convergence rate comparison between Local SGD (L-SGD) and Gossip-PGA (G-PGA) over smooth convex/non-convex problems. The rate for Local SGD is from (Koloskova et al., 2020; Yu et al., 2019; Li et al., 2019b).
|
| 227 |
+
|
| 228 |
+
the structures listed in Sec. 3.1.
|
| 229 |
+
|
| 230 |
+
Transient stage in runtime. Table 2 compares transient stages between Gossip-PGA and Gossip SGD in iterations. But what people really care about in practice is runtime. Since both Gossip SGD and Gossip-PGA have the same computational overhead per iteration, we will focus on communication time spent in the transient stage.
|
| 231 |
+
|
| 232 |
+
Given the bandwidth in a computing cluster with size $n$ , we let $\alpha$ denote the point-to-point latency in the network, and $\theta$ denote the communication time cost to transmit a scalar variable. Since variable $x$ in problem (1) has dimension $d$ , it will take $\theta d$ time to transmit $x$ between two nodes. Under this setting, the All-Reduce global averaging step will take $2\theta d + n\alpha = O(\theta d + n\alpha)$ time (see section 2.5 in (Ben-Nun & Hoefler, 2019)). The gossip-style communication time varies with different network topologies. For the commonly-used ring or grid topology, it takes $|\mathcal{N}_i| \theta d + \alpha = O(\theta d + \alpha)$ for one gossip communication, where $|\mathcal{N}_i|$ is the neighborhood size of node $i$ , and $|\mathcal{N}_i| = 3$ for the ring and 5 for the grid. As to Gossip-PGA, if we amortize the periodic All-Reduce cost into each communication, it will have $|\mathcal{N}_i| \theta d + \alpha + (2\theta d + n\alpha) / H = O(\theta d + \sqrt{n}\alpha)$ when we set $H = \sqrt{n}$ . With the formula Total time = transient stage (in iteration) × comm. per iter. We calculate and compare the transient time between non-iid Gossip-PGA and Gossip-SGD (over the grid topology) in Table 5. Other comparisons for iid scenario or the ring topology can be found in Appendix D. It is observed in all tables that Gossip-PGA has shorter transient time.
|
| 233 |
+
|
| 234 |
+
# 3.5. Comparison with Local SGD
|
| 235 |
+
|
| 236 |
+
The convergence rates of Gossip-PGA and Local SGD are listed in Table 6, from which we derive the transient stages of them in Table 3 (details are in Appendix D). As we have explained in the introduction, it is observed from Tables 3 and 6 that (i) Gossip-PGA always converges faster (or has shorter transient stages) than Local SGD for any $\beta$ and $H$ value, and (ii) Such superiority gets more evident for well-connected network where $\beta \rightarrow 0$ .
|
| 237 |
+
|
| 238 |
+
As to the wall-clock transient time of Local SGD, if we amortize the periodic All-Reduce cost into each local up
|
| 239 |
+
|
| 240 |
+
date, it will take $(2\theta d + n\alpha) / H = O(\theta d / H + n\alpha /H)$ communication time per iteration. Using the transient iteration derived in Table 3, the total transient time for Local SGD (non-iid scenario) will be $O(n^{3}H^{3}(\theta d + n\alpha))$ . Comparing it with the total transient time $O(n^{3}HC_{\beta}^{2}\beta^{4}(H\theta d + n\alpha))$ for Gossip-PGA, we find Gossip-PGA always has shorter transient runtime for a large $H > \beta^4 C_\beta^2$
|
| 241 |
+
|
| 242 |
+
Remark 6. While we discuss in detail that the transient time of Gossip-PGA is shorter than Gossip and Local SGDs, it is worth noting that the communication time during the linear speedup stage (i.e., after the transient stage) also contributes to the total training time. In this stage, Gossip-PGA is less efficient due to its periodic global averaging. However, we illustrate that Gossip-PGA is always endowed with shorter total training time than Gossip and Local SGDs with extensive deep learning experiments in Sec. 5.
|
| 243 |
+
|
| 244 |
+
# 4. Gossip SGD with Adaptive Global Average
|
| 245 |
+
|
| 246 |
+
Gossip-PGA suffers from the burden of tuning $H$ by hand. A small $H$ will incur more communication overhead while a large value can slow down the convergence. We further propose Gossip-AGA, an adaptive extension of Gossip-PGA.
|
| 247 |
+
|
| 248 |
+
Intuition. A small consensus variance $\sum_{i=1}^{n} \mathbb{E}\|\pmb{x}_i - \bar{\pmb{x}}\|^2$ would accelerate Gossip-PGA. To see that, if $\sum_{i=1}^{n} \mathbb{E}\|\pmb{x}_i - \bar{\pmb{x}}\|^2 = 0$ for each iteration, then Gossip-PGA reduces to parallel SGD and can reach its fastest convergence. Recall from Lemma 8 in the appendix that the averaged consensus $\frac{1}{T+1} \sum_{k=0}^{T} \mathbb{E}\|\mathbf{x}^{(k)} - \bar{\mathbf{x}}^{(k)}\|^2$ is bounded by $\frac{d_1 \gamma^2}{T+1} \sum_{k=0}^{T} \mathbb{E}\|\nabla f(\bar{\pmb{x}}^{(k)})\|^2 + d_2 \gamma^2$ where $d_1$ and $d_2$ are constants. It is observed that the initial consensus variance (when $T$ is small) can be significant due to large $\gamma$ and $\mathbb{E}\|\nabla f(\bar{\pmb{x}}^{(k)})\|^2$ . In the later stage when $T$ is sufficiently large, both the diminishing step-size $\gamma$ and gradient $\mathbb{E}\|\nabla f(\bar{\pmb{x}}^{(k)})\|^2$ go to 0 and hence leading to a small consensus variance naturally. With these observations, it is intuitive to take global synchronizations more frequently in initial stages to reduce the overall consensus variance.
|
| 249 |
+
|
| 250 |
+
Convergence. We denote $H^{(\ell)}$ as the duration of the $\ell$ -th period. The following corollary establishes convergence for Gossip-PGA with any time-varying but finite global averaging period sequence $\{H^{(\ell)}\}$ :
|
| 251 |
+
|
| 252 |
+
Corollary 1. Suppose Assumptions 1-3 and 5 hold and the time-varying period $H^{(\ell)}$ is upper bounded by $H_{\max} = \max_{\ell \geq 0}\{H^{(\ell)}\}$ . If $\gamma$ satisfies the condition in Theorem 1 with $\bar{H} = H_{\max}$ , then Gossip-AGA converges at rate (8) in which $H$ is replaced by $H_{\max}$ . (Proof is in Appendix E.)
|
| 253 |
+
|
| 254 |
+
Adaptive Strategy. This subsection will propose an adaptive strategy that is inspired by (Wang & Joshi, 2019). If we recover the influence of the initial value $F_0 = \mathbb{E}f(\bar{\boldsymbol{x}}^{(0)})$ on convergence rate (8), Gossip-PGA for non-convex problems
|
| 255 |
+
|
| 256 |
+
will converge at
|
| 257 |
+
|
| 258 |
+
$$
|
| 259 |
+
O \Big (\frac {\sigma F _ {0} ^ {\frac {1}{2}}}{\sqrt {n T}} + \frac {H ^ {\frac {1}{3}} \beta^ {\frac {2}{3}} \sigma^ {\frac {2}{3}} F _ {0} ^ {\frac {2}{3}}}{T ^ {\frac {2}{3}}} + \frac {H ^ {\frac {2}{3}} \beta^ {\frac {2}{3}} \hat {b} ^ {\frac {2}{3}} F _ {0} ^ {\frac {2}{3}}}{T ^ {\frac {2}{3}}} + \frac {\beta D _ {\beta} F _ {0}}{T} \Big).
|
| 260 |
+
$$
|
| 261 |
+
|
| 262 |
+
For a fixed $T$ , a period $H = \sigma^{\frac{3}{2}} T^{\frac{1}{4}} / (\beta \hat{b} F_{0}^{\frac{1}{4}} n^{\frac{3}{4}})$ will guarantee the linear speedup. Therefore, the initial period $H^{(0)}$ can be chosen as $H^{(0)} = d_{1} / [\mathbb{E}f(\bar{\boldsymbol{x}}^{(0)})]^{\frac{1}{4}}$ for some constant $d_{1}$ . Similarly, for the $\ell$ -th period, workers can be viewed as restarting training at a new initial point $\bar{\boldsymbol{x}}^{(T_{\ell-1})}$ where $T_{\ell-1} = H^{(0)} + \dots + H^{(\ell-1)}$ . As a result, the $\ell$ -th period $H^{(\ell)}$ can be chosen as $H^{(\ell)} = d_{1} / [\mathbb{E}f(\bar{\boldsymbol{x}}^{(T_{\ell-1})})]^{\frac{1}{4}}$ . With such choice of $H^{(0)}$ and $H^{(\ell)}$ , it is not difficult to have
|
| 263 |
+
|
| 264 |
+
$$
|
| 265 |
+
H ^ {(\ell)} = \left(\frac {\mathbb {E} f (\bar {\boldsymbol {x}} ^ {(0)})}{\mathbb {E} f (\bar {\boldsymbol {x}} ^ {(T _ {\ell - 1})})}\right) ^ {\frac {1}{4}} H ^ {(0)}. \tag {9}
|
| 266 |
+
$$
|
| 267 |
+
|
| 268 |
+
Since $\mathbb{E}f(\bar{\pmb{x}}^{(k)})$ will decrease as $k$ increases, (9) will generate an increasing sequence of period $H^{(\ell)}$ . We list Gossip-AGA as Algorithm 2 in Appendix G and elaborate on implementation details there.
|
| 269 |
+
|
| 270 |
+
# 5. Experimental Results
|
| 271 |
+
|
| 272 |
+
In this section, we first examine how the transient stage differs for Gossip-PGA, Gossip and Local SGDs on networks with different topology and size on convex logistic regression. Next, we systematically evaluate the aforementioned methods on two typical large-scale deep learning tasks: image classification (over 256 GPUs) and language modeling (over 64 GPUs). See Appendix F for implementation details.
|
| 273 |
+
|
| 274 |
+
# 5.1. Logistic Regression
|
| 275 |
+
|
| 276 |
+
We consider a distributed logistic regression problem with $f_{i}(x) = \frac{1}{M}\sum_{m=1}^{M}\ln[1 + \exp(-y_{i,m}h_{i,m})^{T}x]$ , where $\{h_{i,m}, y_{i,m}\}_{m=1}^{M}$ are local data samples at agent $i$ with $h_{i,m} \in \mathbb{R}^d$ being the feature vector and $y_{i,m} \in \{+1, -1\}$ being the corresponding label. Each $h_{i,m}$ is generated from the normal distribution $\mathcal{N}(0; 10I_d)$ . To generate $y_{i,m}$ , we first generate an auxiliary random vector $x_i^\star \in \mathbb{R}^d$ with each entry following $\mathcal{N}(0,1)$ . Next, we generate $y_{i,m}$ from a uniform distribution $\mathcal{U}(0,1)$ . If $y_{i,m} \leq 1 / [1 + \exp(-h_{i,m}^T x_i^\star)]$ then $y_{i,m}$ is set as $+1$ ; otherwise $y_{i,m}$ is set as $-1$ . We let $x_i^\star = x^\star \forall i$ to generate data for iid scenario and $x_i^\star \neq x_j^\star \forall i, j$ for non-iid scenario. Each $x_i^\star$ is normalized.
|
| 277 |
+
|
| 278 |
+
Figure 1 compares how Gossip-PGA performs against parallel and Gossip SGD over the ring topology and non-iid data distribution. The network sizes are set as $n = 20, 50, 100$ which results in $\beta = 0.967, 0.995, 0.998$ . We set $d = 10$ and $M = 8000$ . $H$ is set as 16 in Gossip-PGA. The step-size $\gamma$ is initialized as 0.2 and gets decreased by half for every 1000 iterations. We repeat all simulations 50 times and illustrate the mean of all trials with solid curve and
|
| 279 |
+
|
| 280 |
+
<table><tr><td>METHOD</td><td>ACC.%</td><td>HRS</td><td>EPOCHS/HRS TO 76%.</td></tr><tr><td>PARALLEL SGD</td><td>76.26</td><td>2.22</td><td>94 / 1.74</td></tr><tr><td>LOCAL SGD</td><td>74.20</td><td>1.05</td><td>N.A.</td></tr><tr><td>LOCAL SGD ×3</td><td>75.41</td><td>3</td><td>N.A.</td></tr><tr><td>GOSSIP SGD</td><td>75.34</td><td>1.55</td><td>N.A.</td></tr><tr><td>GOSSIP SGD ×2</td><td>76.18</td><td>3</td><td>198/2.55</td></tr><tr><td>OSGP</td><td>75.04</td><td>1.32</td><td>N.A.</td></tr><tr><td>OSGP ×2</td><td>76.07</td><td>2.59</td><td>212/2.28</td></tr><tr><td>GOSSIP-PGA</td><td>76.28</td><td>1.66</td><td>109/1.50</td></tr><tr><td>GOSSIP-AGA</td><td>76.25</td><td>1.57</td><td>91/1.20</td></tr></table>
|
| 281 |
+
|
| 282 |
+
Table 7. Comparison of Top-1 validation accuracy (Column 2) and wall-clock training time (Column 3) on different methods after finishing all epochs. We also report the epochs and training time required to reach $76\%$ accuracy (Column 4). "N.A." implies that the target accuracy is not reached when all epochs are completed.
|
| 283 |
+
|
| 284 |
+
standard deviation with shaded area. It is observed that both Gossip SGD and Gossip-PGA will asymptotically converge at the same rate as parallel SGD (i.e., the linear speedup stage), albeit with different transient stages. Gossip-PGA always has shorter transient stages than Gossip SGD, and such superiority gets more evident when network size increases (recall that $1 - \beta = O(1/n^2)$ ). For experiments on different topologies such as grid and exponential graph, on iid data distribution, and comparison with Local SGD, see Appendix F. All experiments are consistent with the theoretical transient stage comparisons in Tables 2 and 3.
|
| 285 |
+
|
| 286 |
+
# 5.2. Image Classification
|
| 287 |
+
|
| 288 |
+
The ImageNet-1k (Deng et al., 2009) ${}^{1}$ dataset consists of 1,281,167 training images and 50,000 validation images in 1000 classes. We train ResNet-50 (He et al., 2016) model (~25.5M parameters) following the training protocol of (Goyal et al., 2017). We train total 120 epochs. The learning rate is warmed up in the first 5 epochs and is decayed by a factor of 10 at 30, 60 and 90 epochs. We set the period to 6 for both Local SGD and Gossip-PGA. In Gossip-AGA, the period is set to 4 initially and changed adaptively afterwards, roughly 9% iterations conduct global averaging.
|
| 289 |
+
|
| 290 |
+
Table 7 shows the top-1 validation accuracy and wall-clock training time of aforementioned methods. It is observed both Gossip-PGA and Gossip-AGA can reach comparable accuracy with parallel SGD after all 120 epochs but with roughly $1.3\mathrm{x}\sim 1.4\mathrm{x}$ training time speed-up. On the other hand, while local and Gossip SGD completes all 120 epochs faster than Gossip-PGA/AGA and parallel SGD, they suffer from a $2.06\%$ and $0.92\%$ accuracy degradation separately. Moreover, both algorithms cannot reach the $76\%$ top-1 accuracy within 120 epochs. We also compare with OSGP (Assran et al., 2019), which adding overlapping on the Gossip
|
| 291 |
+
|
| 292 |
+

|
| 293 |
+
Figure 1. Convergence comparison between Gossip-PGA, Gossip and parallel SGDs on the logistic regression problem over ring topology. The transient stage is determined by counting iterations before an algorithm exactly matches the convergence curve of Parallel SGD. Note that the transient stage for Gossip SGD in the middle and right sub-figures is beyond the plotting canvas.
|
| 294 |
+
|
| 295 |
+

|
| 296 |
+
|
| 297 |
+

|
| 298 |
+
|
| 299 |
+

|
| 300 |
+
Figure 2. Convergence results on the ImageNet in terms of iteration and runtime. More results are in Appendix F.3.
|
| 301 |
+
|
| 302 |
+

|
| 303 |
+
|
| 304 |
+
SGD. We find $\mathrm{OSGP} \times 2$ , while faster than Gossip $\mathrm{SGD} \times 2$ , still needs more time than Gossip-PGA to achieve $76\%$ accuracy. To further illustrate how much time it will take local and Gossip SGD to reach the target accuracy, we run another Local SGD and Gossip SGD experiments with extended epochs (i.e., Gossip $\mathrm{SGD} \times 2$ trains total 240 epochs and the learning rate is decayed at 60, 120, and 180 epoch. Local $\mathrm{SGD} \times 3$ trains total 360 epochs and the learning rate is decayed at 90, 180, and 270 epochs). It is observed that Gossip- $\mathrm{SGD} \times 2$ can reach the target with notably more time expense than Gossip-PGA/AGA and parallel SGD, and Local $\mathrm{SGD} \times 3$ still cannot reach the $76\%$ accuracy. All these observations validate that periodic global averaging can accelerate Gossip SGD significantly.
|
| 305 |
+
|
| 306 |
+
Figure 2 shows the iteration-wise and runtime-wise convergence in terms of training loss. In the left figure, it is observed Gossip-PGA/AGA converges faster (in iteration) and more accurate than local and Gossip SGD, which is consistent with our theory. In the right figure, it is observed that Gossip-PGA/AGA is the fastest method (in time) that can reach the same training loss as parallel SGD.
|
| 307 |
+
|
| 308 |
+
Compare with SlowMo. Gossip-PGA is an instance of SlowMo, in which the base optimizer is set as Gossip SGD, slow momentum $\beta = 0$ , and slow learning rate $\alpha = 1$ . We
|
| 309 |
+
|
| 310 |
+
made experiments to compare Gossip-PGA with SlowMo. It is observed the additional slow momentum update helps SlowMo with large $H$ but degrades it when $H$ is small. This observation is consistent with Fig. 3(a) in (Wang et al., 2019). This observation implies that the slow momentum update may not always be beneficial in SlowMo.
|
| 311 |
+
|
| 312 |
+
<table><tr><td>Period</td><td>Gossip-PGA</td><td>SlowMo</td></tr><tr><td>H=6</td><td>76.28</td><td>75.23</td></tr><tr><td>H=48</td><td>75.66</td><td>75.81</td></tr></table>
|
| 313 |
+
|
| 314 |
+
Table 8. Comparison of Top-1 validation accuracy with SlowMo with different periods.
|
| 315 |
+
|
| 316 |
+
Ring Topology. While the convergence property of Gossip-PGA is established over the static network topology, we utilize the dynamic one-peer exponential topology in the above deep experiments because it usually achieves better accuracy. To illustrate the derived theoretical results, we make an additional experiment, over the static ring topology, to compare Gossip-PGA with Gossip SGD in Table 9. It is observed that Gossip-PGA can achieve better accuracy than Gossip SGD after running the same epochs, which coincides with our analysis that Gossip-PGA has faster convergence.
|
| 317 |
+
|
| 318 |
+
Scalability. We establish in Theorem 2 that Gossip-PGA can achieve linear speedup in the non-convex setting. To
|
| 319 |
+
|
| 320 |
+

|
| 321 |
+
Figure 3. Convergence results of BERT on the language modeling task in terms of iteration and runtime.
|
| 322 |
+
|
| 323 |
+

|
| 324 |
+
|
| 325 |
+
<table><tr><td>Method</td><td>Epoch</td><td>Acc%</td><td>Time(Hrs.)</td></tr><tr><td>Gossip SGD</td><td>120</td><td>74.86</td><td>1.56</td></tr><tr><td>Gossip PGA</td><td>120</td><td>75.94</td><td>1.68</td></tr></table>
|
| 326 |
+
|
| 327 |
+
validate it, we conduct a scaling experiment and list the result in Table 10. Figures represent the final accuracy and hours to finish training. It is observed that Gossip-PGA can achieve a roughly linear speedup in training time without notably performance degradation.
|
| 328 |
+
|
| 329 |
+
Table 9. Comparison of Top-1 validation accuracy on Gossip-PGA and Gossip SGD with ring topology.
|
| 330 |
+
|
| 331 |
+
<table><tr><td>Method</td><td>4 nodes</td><td>8 nodes</td><td>16 nodes</td><td>32 nodes</td></tr><tr><td>Parallel SGD</td><td>76.3/11.6</td><td>76.4/6.3</td><td>76.3/3.7</td><td>76.2/2.2</td></tr><tr><td>Gossip SGD</td><td>76.3/11.1</td><td>76.4/5.7</td><td>75.9/2.8</td><td>75.0/1.5</td></tr><tr><td>Gossip PGA</td><td>76.4/11.2</td><td>76.7/5.9</td><td>76.3/3.0</td><td>76.2/1.6</td></tr></table>
|
| 332 |
+
|
| 333 |
+
# 5.3. Language Modeling
|
| 334 |
+
|
| 335 |
+
BERT (Devlin et al., 2018) is a widely used pre-training language representation model for NLP tasks. We train a BERT-Large model ( $\sim$ 330M parameters) on the Wikipedia
|
| 336 |
+
|
| 337 |
+
Table 10. Scaling effects on different methods with different numbers of nodes. Figures represent the final accuracy and hours to complete training.
|
| 338 |
+
|
| 339 |
+
<table><tr><td>METHOD</td><td>FINAL LOSS</td><td>RUNTIME (HRS)</td></tr><tr><td>PARALLEL SGD</td><td>1.75</td><td>59.02</td></tr><tr><td>LOCAL SGD</td><td>2.85</td><td>20.93</td></tr><tr><td>LOCAL SGD ×3</td><td>1.88</td><td>60</td></tr><tr><td>GOSSIP SGD</td><td>2.17</td><td>29.7</td></tr><tr><td>GOSSIP SGD ×2</td><td>1.81</td><td>59.7</td></tr><tr><td>GOSSIP-PGA</td><td>1.82</td><td>35.4</td></tr><tr><td>GOSSIP-AGA</td><td>1.77</td><td>30.4</td></tr></table>
|
| 340 |
+
|
| 341 |
+
Table 11. Comparison of training loss and training time of BERT training on different algorithms after completing all training steps.
|
| 342 |
+
|
| 343 |
+
and BookCorpus datasets. We set the period to 6 for both Local SGD and Gossip-PGA. In Gossip-AGA, the period is set to 4 initially and changed adaptively afterwards, roughly $9.6\%$ iterations conduct global averaging.
|
| 344 |
+
|
| 345 |
+
Table 11 shows the final training loss and training runtime of the aforementioned methods. Gossip-AGA can reach comparable training loss with parallel SGD, but with roughly $1.94 \times$ training time speed-up. Gossip SGD and Local SGD cannot reach training loss that below 1.8 even if they are trained over 60 hours (see Local SGD $\times 3$ and Gossip SGD $\times 2$ .) Figure 3 shows the iteration-wise and runtime-wise convergence w.r.t training loss of the aforementioned methods. The left plot shows Gossip-PGA/AGA has almost the same convergence as Gossip SGD in iterations; the right plot shows that Gossip-AGA is the fastest method in training time that can reach the same accuracy as parallel SGD.
|
| 346 |
+
|
| 347 |
+
# 6. Conclusion
|
| 348 |
+
|
| 349 |
+
We introduce Gossip-PGA/AGA to mitigate the slow convergence rate of Gossip SGD in distributed training. Theoretically, we prove the convergence improvement in smooth convex and non-convex problem. Empirically, experimental results of large-scale training validate our theories.
|
| 350 |
+
|
| 351 |
+
# References
|
| 352 |
+
|
| 353 |
+
Alistarh, D., Grubic, D., Li, J., Tomioka, R., and Vojnovic, M. Qsgd: Communication-efficient sgd via gradient quantization and encoding. In Advances in Neural Information Processing Systems, pp. 1709-1720, 2017.
|
| 354 |
+
|
| 355 |
+
Assran, M., Loizou, N., Ballas, N., and Rabbat, M. Stochastic gradient push for distributed deep learning. In International Conference on Machine Learning (ICML), pp. 344-353, 2019.
|
| 356 |
+
|
| 357 |
+
Bayoumi, A. K. R., Mishchenko, K., and Richtarik, P. Tighter theory for local sgd on identical and heteroge-
|
| 358 |
+
|
| 359 |
+
neous data. In International Conference on Artificial Intelligence and Statistics, pp. 4519-4529, 2020.
|
| 360 |
+
Ben-Nun, T. and Hoefler, T. Demystifying parallel and distributed deep learning: An in-depth concurrency analysis. ACM Computing Surveys (CSUR), 52(4):1-43, 2019.
|
| 361 |
+
Berahas, A. S., Bollapragada, R., Keskar, N. S., and Wei, E. Balancing communication and computation in distributed optimization. IEEE Transactions on Automatic Control, 64(8):3141-3155, 2018.
|
| 362 |
+
Bernstein, J., Zhao, J., Azizzadenesheli, K., and Anandkumar, A. signsgd with majority vote is communication efficient and fault tolerant. arXiv preprint arXiv:1810.05291, 2018.
|
| 363 |
+
Chen, J. and Sayed, A. H. Diffusion adaptation strategies for distributed optimization and learning over networks. IEEE Transactions on Signal Processing, 60(8):4289-4305, 2012.
|
| 364 |
+
Chen, T., Giannakis, G., Sun, T., and Yin, W. LAG: Lazily aggregated gradient for communication-efficient distributed learning. In Advances in Neural Information Processing Systems, pp. 5050-5060, 2018.
|
| 365 |
+
Deng, J., Dong, W., Socher, R., Li, L.-J., Li, K., and Fei-Fei, L. Imagenet: A large-scale hierarchical image database. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 248-255. IEEE, 2009.
|
| 366 |
+
Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
|
| 367 |
+
Di Lorenzo, P. and Scutari, G. Next: In-network nonconvex optimization. IEEE Transactions on Signal and Information Processing over Networks, 2(2):120-136, 2016.
|
| 368 |
+
Duchi, J. C., Agarwal, A., and Wainwright, M. J. Dual averaging for distributed optimization: Convergence analysis and network scaling. IEEE Transactions on Automatic control, 57(3):592-606, 2011.
|
| 369 |
+
Goyal, P., Dollar, P., Girshick, R., Noordhuis, P., Wesolowski, L., Kyrola, A., Tulloch, A., Jia, Y., and He, K. Accurate, large minibatch sgd: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017.
|
| 370 |
+
He, K., Zhang, X., Ren, S., and Sun, J. Deep residual learning for image recognition. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770-778, 2016.
|
| 371 |
+
He, L., Bian, A., and Jaggi, M. Cola: Decentralized linear learning. In Advances in Neural Information Processing Systems, pp. 4536-4546, 2018.
|
| 372 |
+
|
| 373 |
+
Koloskova, A., Lin, T., Stich, S. U., and Jaggi, M. Decentralized deep learning with arbitrary communication compression. In International Conference on Learning Representations, 2019a.
|
| 374 |
+
Koloskova, A., Stich, S., and Jaggi, M. Decentralized stochastic optimization and gossip algorithms with compressed communication. In International Conference on Machine Learning, pp. 3478-3487, 2019b.
|
| 375 |
+
Koloskova, A., Loizou, N., Boreiri, S., Jaggi, M., and Stich, S. U. A unified theory of decentralized sgd with changing topology and local updates. In International Conference on Machine Learning (ICML), pp. 1-12, 2020.
|
| 376 |
+
Li, M., Andersen, D. G., Park, J. W., Smola, A. J., Ahmed, A., Josifovski, V., Long, J., Shekita, E. J., and Su, B.-Y. Scaling distributed machine learning with the parameter server. In 11th {USENIX} Symposium on Operating Systems Design and Implementation (OSDI) 14), pp. 583-598, 2014.
|
| 377 |
+
Li, X., Huang, K., Yang, W., Wang, S., and Zhang, Z. On the convergence of fedavg on non-iid data. In International Conference on Learning Representations, 2019a.
|
| 378 |
+
Li, X., Yang, W., Wang, S., and Zhang, Z. Communication efficient decentralized training with multiple local updates. arXiv preprint arXiv:1910.09126, 2019b.
|
| 379 |
+
Li, Z., Shi, W., and Yan, M. A decentralized proximal-gradient method with network independent step-sizes and separated convergence rates. IEEE Transactions on Signal Processing, July 2019c. early acces. Also available on arXiv:1704.07807.
|
| 380 |
+
Lian, X., Zhang, C., Zhang, H., Hsieh, C.-J., Zhang, W., and Liu, J. Can decentralized algorithms outperform centralized algorithms? a case study for decentralized parallel stochastic gradient descent. In Advances in Neural Information Processing Systems, pp. 5330-5340, 2017.
|
| 381 |
+
Lian, X., Zhang, W., Zhang, C., and Liu, J. Asynchronous decentralized parallel stochastic gradient descent. In International Conference on Machine Learning, pp. 3043-3052, 2018.
|
| 382 |
+
Lin, T., Stich, S. U., Patel, K. K., and Jaggi, M. Don't use large mini-batches, use local sgd. arXiv preprint arXiv:1808.07217, 2018.
|
| 383 |
+
Liu, Y., Xu, W., Wu, G., Tian, Z., and Ling, Q. Communication-censored admm for decentralized consensus optimization. IEEE Transactions on Signal Processing, 67(10):2565-2579, 2019.
|
| 384 |
+
|
| 385 |
+
Lu, S., Zhang, X., Sun, H., and Hong, M. Gnsd: A gradient-tracking based nonconvex stochastic algorithm for decentralized optimization. In 2019 IEEE Data Science Workshop (DSW), pp. 315-321. IEEE, 2019.
|
| 386 |
+
Luo, Q., He, J., Zhuo, Y., and Qian, X. Prague: High-performance heterogeneity-aware asynchronous decentralized training. In Proceedings of the Twenty-Fifth International Conference on Architectural Support for Programming Languages and Operating Systems, pp. 401-416, 2020.
|
| 387 |
+
McMahan, B., Moore, E., Ramage, D., Hampson, S., and y Arcas, B. A. Communication-efficient learning of deep networks from decentralized data. In Artificial Intelligence and Statistics, pp. 1273-1282. PMLR, 2017.
|
| 388 |
+
Nedic, A. and Ozdaglar, A. Distributed subgradient methods for multi-agent optimization. IEEE Transactions on Automatic Control, 54(1):48-61, 2009.
|
| 389 |
+
Nedic, A., Olshovsky, A., and Shi, W. Achieving geometric convergence for distributed optimization over time-varying graphs. SIAM Journal on Optimization, 27(4): 2597-2633, 2017.
|
| 390 |
+
Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., et al. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems (NeurIPS), pp. 8024-8035, 2019.
|
| 391 |
+
Patarasuk, P. and Yuan, X. Bandwidth optimal all-reduce algorithms for clusters of workstations. Journal of Parallel and Distributed Computing, 69(2):117-124, 2009.
|
| 392 |
+
Qu, G. and Li, N. Harnessing smoothness to accelerate distributed optimization. IEEE Transactions on Control of Network Systems, 5(3):1245-1260, 2018.
|
| 393 |
+
Scaman, K., Bach, F., Bubeck, S., Lee, Y. T., and Massoulie, L. Optimal algorithms for smooth and strongly convex distributed optimization in networks. In International Conference on Machine Learning, pp. 3027-3036, 2017.
|
| 394 |
+
Scaman, K., Bach, F., Bubeck, S., Massoulie, L., and Lee, Y. T. Optimal algorithms for non-smooth distributed optimization in networks. In Advances in Neural Information Processing Systems, pp. 2740-2749, 2018.
|
| 395 |
+
Shi, W., Ling, Q., Yuan, K., Wu, G., and Yin, W. On the linear convergence of the admm in decentralized consensus optimization. IEEE Transactions on Signal Processing, 62(7):1750-1761, 2014.
|
| 396 |
+
Shi, W., Ling, Q., Wu, G., and Yin, W. EXTRA: An exact first-order algorithm for decentralized consensus optimization. SIAM Journal on Optimization, 25(2):944-966, 2015.
|
| 397 |
+
|
| 398 |
+
Stich, S. U. Local sgd converges fast and communicates little. In International Conference on Learning Representations (ICLR), 2019.
|
| 399 |
+
Tang, H., Lian, X., Yan, M., Zhang, C., and Liu, J. $d^2$ : Decentralized training over decentralized data. In International Conference on Machine Learning, pp. 4848-4856, 2018.
|
| 400 |
+
Tang, H., Yu, C., Lian, X., Zhang, T., and Liu, J. Doublesqueeze: Parallel stochastic gradient descent with double-pass error-compensated compression. In International Conference on Machine Learning, pp. 6155-6165. PMLR, 2019.
|
| 401 |
+
Tsitsiklis, J., Bertsekas, D., and Athans, M. Distributed asynchronous deterministic and stochastic gradient optimization algorithms. IEEE transactions on automatic control, 31(9):803-812, 1986.
|
| 402 |
+
Uribe, C. A., Lee, S., Gasnikov, A., and Nedic, A. A dual approach for optimal algorithms in distributed optimization over networks. Optimization Methods and Software, pp. 1-40, 2020.
|
| 403 |
+
Wang, J. and Joshi, G. Adaptive communication strategies to achieve the best error-routine trade-off in local-update sgd. In Systems and Machine Learning (SysML) Conference, 2019.
|
| 404 |
+
Wang, J., Tantia, V., Ballas, N., and Rabbat, M. SlowMo: Improving communication-efficient distributed sgd with slow momentum. arXiv preprint arXiv:1910.00643, 2019.
|
| 405 |
+
Xin, R., Khan, U. A., and Kar, S. An improved convergence analysis for decentralized online stochastic non-convex optimization. arXiv preprint arXiv:2008.04195, 2020.
|
| 406 |
+
Xu, J., Zhu, S., Soh, Y. C., and Xie, L. Augmented distributed gradient methods for multi-agent optimization under uncoordinated constant step sizes. In IEEE Conference on Decision and Control (CDC), pp. 2055-2060, Osaka, Japan, 2015.
|
| 407 |
+
You, Y., Li, J., Reddi, S., Hseu, J., Kumar, S., Bhojanapalli, S., Song, X., Demmel, J., Keutzer, K., and Hsieh, C.-J. Large batch optimization for deep learning: Training bert in 76 minutes. In International Conference on Learning Representations, 2019.
|
| 408 |
+
Yu, H., Yang, S., and Zhu, S. Parallel restarted sgd with faster convergence and less communication: Demystifying why model averaging works for deep learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 5693-5700, 2019.
|
| 409 |
+
|
| 410 |
+
Yuan, K., Ling, Q., and Yin, W. On the convergence of decentralized gradient descent. SIAM Journal on Optimization, 26(3):1835-1854, 2016.
|
| 411 |
+
Yuan, K., Ying, B., Zhao, X., and Sayed, A. H. Exact diffusion for distributed optimization and learning - Part I: Algorithm development. IEEE Transactions on Signal Processing, 67(3):708 - 723, 2019.
|
| 412 |
+
Yuan, K., Alghunaim, S. A., Ying, B., and Sayed, A. H. On the influence of bias-correction on distributed stochastic optimization. IEEE Transactions on Signal Processing, 2020.
|
| 413 |
+
Zhang, J., De Sa, C., Mitliagkas, I., and Ré, C. Parallel sgd: When does averaging help? arXiv preprint arXiv:1606.07365, 2016.
|
| 414 |
+
Zinkevich, M., Weimer, M., Li, L., and Smola, A. J. Parallelized stochastic gradient descent. In Advances in neural information processing systems, pp. 2595-2603, 2010.
|
acceleratinggossipsgdwithperiodicglobalaveraging/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8517fb028ecc6eb0197b0b14979abe888eb12687afb4f4c73cdede7f3125862e
|
| 3 |
+
size 529724
|
acceleratinggossipsgdwithperiodicglobalaveraging/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:788a269a3202b775f9812a241f2f9bfd776fc6edf352eb04e2a6e825733696b8
|
| 3 |
+
size 605661
|
acceleratingsafereinforcementlearningwithconstraintmismatchedbaselinepolicies/742e98f5-2a06-4f94-98c6-f2fdd57168cf_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:32426b97849180ae937c4f94d3015ce0b71efe7235c35727ae6c7feea235ab0b
|
| 3 |
+
size 101329
|
acceleratingsafereinforcementlearningwithconstraintmismatchedbaselinepolicies/742e98f5-2a06-4f94-98c6-f2fdd57168cf_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1920bcaa2467b372ae860d86f1c906919f496caaa9c3c090e7440ad6ee82f27c
|
| 3 |
+
size 127207
|
acceleratingsafereinforcementlearningwithconstraintmismatchedbaselinepolicies/742e98f5-2a06-4f94-98c6-f2fdd57168cf_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:01a1c425b060e937071aa69f32e737a183aff185ebb53c985ae7997b16b6e12c
|
| 3 |
+
size 5526601
|
acceleratingsafereinforcementlearningwithconstraintmismatchedbaselinepolicies/full.md
ADDED
|
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerating Safe Reinforcement Learning with Constraint-mismatched Baseline Policies
|
| 2 |
+
|
| 3 |
+
Tsung-Yen Yang<sup>1</sup> Justinian Rosca<sup>2</sup> Karthik Narasimhan<sup>1</sup> Peter J. Ramadge<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
We consider the problem of reinforcement learning when provided with (1) a baseline control policy and (2) a set of constraints that the learner must satisfy. The baseline policy can arise from demonstration data or a teacher agent and may provide useful cues for learning, but it might also be sub-optimal for the task at hand, and is not guaranteed to satisfy the specified constraints, which might encode safety, fairness or other application-specific requirements. In order to safely learn from baseline policies, we propose an iterative policy optimization algorithm that alternates between maximizing expected return on the task, minimizing distance to the baseline policy, and projecting the policy onto the constraint-satisfying set. We analyze our algorithm theoretically and provide a finite-time convergence guarantee. In our experiments on five different control tasks, our algorithm consistently outperforms several state-of-the-art baselines, achieving 10 times fewer constraint violations and $40\%$ higher reward on average.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
Deep reinforcement learning (RL) has achieved impressive results in several domains such as games (Mnih et al., 2013; Silver et al., 2016) and robotic control (Levine et al., 2016; Rajeswaran et al., 2017). However, in these complex applications, learning policies from scratch often requires tremendous amounts of time and computational power. To alleviate this issue, one would like to leverage a baseline policy available from demonstrations, a teacher or a previous task. However, the baseline policy may be sub-optimal for the new application and may not be guaranteed to produce actions that satisfy desired constraints on safety, fairness,
|
| 12 |
+
|
| 13 |
+
or other costs. For instance, when you drive an unfamiliar vehicle, you do so cautiously to ensure safety, while adapting your driving technique to the vehicle characteristics to improve your 'driving reward'. In effect, you (as the agent) gradually adapt a baseline policy (i.e., prior driving skill) to avoid violating the constraints (e.g., safety) while improving your driving reward (e.g., travel time, fuel efficiency).
|
| 14 |
+
|
| 15 |
+
The problem of safely learning from baseline policies is challenging because directly leveraging the baseline policy, as in DAGGER (Ross et al., 2011) or GAIL (Ho & Ermon, 2016), may result in policies that violate the constraints since the baseline is not guaranteed to satisfy them. To ensure constraint satisfaction, prior work either adds a hyper-parameter weighted copy of the imitation learning (IL) objective (i.e., imitating the baseline policy) to the RL objective (Rajeswaran et al., 2017; Gao et al., 2018; Hester et al., 2018), or pre-trains a policy with the baseline policy (e.g., use a baseline policy as an initial policy) and then fine-tunes it through RL (Mülling et al., 2013; Chernova & Thomaz, 2014). However, both approaches do not ensure constraint satisfaction on every learning episode, which is an important feature of safe RL. In addition, the policy initialized by a low entropy baseline policy may never explore.
|
| 16 |
+
|
| 17 |
+
In this work, to learn from the baseline policy while satisfying constraints, we propose an iterative algorithm that performs policy updates in three stages. The first step updates the policy to maximize expected reward using trust region policy optimization (e.g., TRPO (Schulman et al., 2015)). This can, however, result in a new intermediate policy that is too far from the baseline policy and may not satisfy the constraints. The second step performs a projection in policy space to control the distance between the current policy and the baseline policy. In contrast to the approach that regularizes the standard RL objective with the distance w.r.t. the baseline policy and makes the regularization parameter fade over time, our approach allows the learning agent to update the distance when needed. In addition, this step allows the agent to explore without being overly restricted by the potentially constraint-violating baseline policy. This also enables the baseline policy to influence the learning even at later iterations without the computational burden of learning a cost function for the baseline policy (Kwon et al., 2020). The third step ensures constraint satisfaction at every
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Figure 1. (a) Update procedures for SPACE. Step 1 (green) improves the reward in the trust region. Step 2 (blue) projects the policy onto an adaptable region around the baseline policy $\pi_B$ . Step 3 (red) projects the policy onto the constraint set. (b) Illustrating when $\pi_B$ is outside the constraint set. (c) Illustrating when $\pi_B$ is inside the constraint set. The highest reward is achieved at the yellow star. $h_D^k$ (the distance between $\pi^k$ and $\pi_B$ ) is updated to $h_D^{k+1}$ to ensure constraint satisfaction and exploration of the agent.
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
|
| 26 |
+
iteration by performing a projection onto the set of policies that satisfy the given constraints. We call our algorithm Safe Policy Adaptation with Constrained Exploration (SPACE).
|
| 27 |
+
|
| 28 |
+
This paper's contributions are two-fold. (1) We explicitly examine how the baseline policy affects the cost violations of the agent and hence provide a method to safely learn from the baseline policy. This is done by controlling the distance between the learned policy at iteration $k$ and the baseline policy to ensure both feasibility of the optimization problem and safe exploration by the learning agent (Fig. 1(b) and (c)). Such approach, in contrast to non-adaptable constraint sets and learning a policy from scratch (Yang et al., 2020), leads to better sample efficiency and hence are more favorable in real applications. To our knowledge, prior work does not carry out such analysis. We further provide a finite-time guarantee for the convergence of SPACE. (2) Second, we empirically show that SPACE can robustly learn from sub-optimal baseline policies in a diverse set of tasks. These include two Mujoco tasks with safety constraints, and two real-world traffic management tasks with fairness constraints. We further show that our algorithm can safely learn from a human demonstration driving policy with safety constraints. In all cases, SPACE outperforms state-of-the-art safe RL algorithms, averaging $40\%$ more reward with 10 times fewer cost violations. This shows that SPACE safely and efficiently leverages the baseline policy, and represents a step towards safe deployment of RL in real applications<sup>1</sup>.
|
| 29 |
+
|
| 30 |
+
# 2. Related Work
|
| 31 |
+
|
| 32 |
+
Safe RL. Learning constraint-satisfying policies has been explored in the context of safe RL (Garcia & Fernandez, 2015; Hasanbeig et al., 2020; Junges et al., 2016; Jansen et al., 2020; Chow et al., 2018; Bharadhwaj et al., 2020; Srinivasan et al., 2020). Prior work either uses a conditional-gradient approach (Achiam et al., 2017), adds a weighted copy of the cost objective in the reward function (Tessler et al., 2019; Chow et al., 2019; Fujimoto et al., 2019; Stooke et al., 2020), adds a safety layer to the policy (Dalal et al.,
|
| 33 |
+
|
| 34 |
+
2018; Avni et al., 2019), or uses the chanced constraints (Fu & Prashanth L, 2018; Zheng & Ratliff, 2020). In contrast, we use projections to ensure safety.
|
| 35 |
+
|
| 36 |
+
In addition, Thananjeyan et al. (2021) use the sub-optimal demonstration (still safe $\pi_B$ ) to guide the learning. They obtain the safe policy by iteratively solving model predictive control. However, we focus on the model-free setting, which makes it hard to compare to our method. While Zhang et al. (2020); Srinivasan et al. (2020); Thananjeyan et al. (2020), pre-train a safe policy, they do not focus on how to safely use baseline policies. Moreover, we do not have two stages of pre-training and fine-tuning.
|
| 37 |
+
|
| 38 |
+
Yang et al. (2020) also uses projections to ensure safety-Projection-based Constrained Policy Optimization (PCPO). However, we show that treating learning from the baseline policy as another fixed constraint in PCPO results in cost constraint violations or sub-optimal reward performance. Instead, our main idea is to have an adaptable constraint set that adjusts the distance between the baseline and the learning policies at each iteration with the distance controlled by the learning progress of the agent, i.e., the reward improvement and the cost constraint violations. Such approach ensures exploration and cost satisfaction of the agent. Please refer to Section 5 for the detailed comparison to PCPO.
|
| 39 |
+
|
| 40 |
+
Policy optimization with the initial safe set. Wachi & Sui (2020); Sui et al. (2015); Turchetta et al. (2016) assume that the initial safe set is given, and the agent explores the environment and verifies the safety function from this initial safe set. In contrast, our assumption is to give a baseline policy to the agent. Both assumptions are reasonable as they provide an initial understanding of the environment.
|
| 41 |
+
|
| 42 |
+
Leveraging baseline policies for RL. Prior work has used baseline policies to provide initial information to RL algorithms to reduce or avoid undesirable situations. This is done by either: initializing the policy with the baseline policy (Driessens & Džeroski, 2004; Smart & Kaelbling, 2000; Koppejan & Whiteson, 2011; Abbeel et al., 2010; Gao et al., 2018; Le et al., 2019; Vecerik et al., 2017; Jaques et al., 2019), or providing a teacher's advice to the agent (Garcia & Fernández, 2012; Quintía Vidal et al., 2013; Abel et al.,
|
| 43 |
+
|
| 44 |
+
2017; Zhang et al., 2019). However, such works often assume that the baseline policy is constraint-satisfying (Sun et al., 2018; Balakrishna et al., 2019). In contrast, SPACE safely leverages the baseline policy without requiring it to satisfy the specified constraints. Pathak et al. (2015); Bartocci et al. (2011) also modify the existing known models (policies) based on new conditions in the context of the formal methods. In contrast, we solve this problem using projections in the policy space.
|
| 45 |
+
|
| 46 |
+
Learning from logged demonstration data. To effectively learn from demonstration data given by the baseline policy, Wu et al. (2019); Brown et al. (2019); Kwon et al. (2020) assess the demonstration data by either: predicting their cost in the new task using generative adversarial networks (GANs) (Goodfellow et al., 2014), or directly learning the cost function of the demonstration data. In contrast, SPACE controls the distance between the learned and baseline policies to ensure learning improvement.
|
| 47 |
+
|
| 48 |
+
# 3. Problem Formulation
|
| 49 |
+
|
| 50 |
+
We frame our problem as a constrained Markov Decision Process (CMDP) (Altman, 1999), defined as a tuple $< S, \mathcal{A}, T, R, C >$ . Here $S$ is the set of states, $\mathcal{A}$ is the set of actions, and $T$ specifies the conditional probability $T(s'|s, a)$ that the next state is $s'$ given the current state $s$ and action $a$ . In addition, $R: S \times \mathcal{A} \to \mathbb{R}$ is a reward function, and $C: S \times \mathcal{A} \to \mathbb{R}$ is a constraint cost function. The reward function encodes the benefit of using action $a$ in state $s$ , while the cost function encodes the corresponding constraint violation penalty.
|
| 51 |
+
|
| 52 |
+
A policy is a map from states to probability distributions on $\mathcal{A}$ . It specifies that in state $s$ the selected action is drawn from the distribution $\pi(s)$ . The state then transits from $s$ to $s'$ according to the state transition distribution $T(s'|s, a)$ . In doing so, a reward $R(s, a)$ is received and a constraint cost $C(s, a)$ is incurred, as outlined above.
|
| 53 |
+
|
| 54 |
+
Let $\gamma \in (0,1)$ denote a discount factor, and $\tau$ denote the trajectory $\tau = (s_0,a_0,s_1,\dots)$ induced by a policy $\pi$ . Normally, we seek a policy $\pi$ that maximizes a cumulative discounted reward
|
| 55 |
+
|
| 56 |
+
$$
|
| 57 |
+
J _ {R} (\pi) \doteq \mathbb {E} _ {\tau \sim \pi} \left[ \sum_ {t = 0} ^ {\infty} \gamma^ {t} R \left(s _ {t}, a _ {t}\right) \right], \tag {1}
|
| 58 |
+
$$
|
| 59 |
+
|
| 60 |
+
while keeping the cumulative discounted cost below $h_C$
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
J _ {C} (\pi) \doteq \mathbb {E} _ {\tau \sim \pi} \left[ \sum_ {t = 0} ^ {\infty} \gamma^ {t} C \left(s _ {t}, a _ {t}\right) \right] \leq h _ {C}. \tag {2}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
Here we consider an additional objective. We are provided with a baseline policy $\pi_B$ and at each state $s$ we measure the divergence between $\pi(s)$ and $\pi_B(s)$ . For example, this could be the KL-divergence $D(s) \doteq D_{\mathrm{KL}}(\pi(s) \| \pi_B(s))$ .
|
| 67 |
+
|
| 68 |
+
We then seek a policy that maximizes Eq. (1), satisfies Eq. (2), and ensures the discounted divergence between the learned and baseline policies is below $h_D$ :
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
J _ {D} (\pi) \doteq \mathbb {E} _ {\tau \sim \pi} \left[ \sum_ {t = 0} ^ {\infty} \gamma^ {t} D (s _ {t}) \right] \leq h _ {D}. \tag {3}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
We do not assume that the baseline policy satisfies the cost constraint. Hence we allow $h_D$ to be adjusted during the learning of $\pi$ to allow for reward improvement and constraint satisfaction.
|
| 75 |
+
|
| 76 |
+
Let $\mu_t(\cdot|\pi)$ denote the state distribution at time $t$ under policy $\pi$ . The discounted state distribution induced by $\pi$ is defined to be $d^{\pi}(s) \doteq (1 - \gamma) \sum_{t=0}^{\infty} \gamma^t \mu_t(s|\pi)$ . Now bring in the reward advantage function (Kakade & Langford, 2002) defined by
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
A _ {R} ^ {\pi} (s, a) \dot {=} Q _ {R} ^ {\pi} (s, a) - V _ {R} ^ {\pi} (s),
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
where $V_{R}^{\pi}(s) \doteq \mathbb{E}_{\tau \sim \pi}[\sum_{t=0}^{\infty} \gamma^{t} R(s_{t}, a_{t}) | s_{0} = s]$ is the expected reward from state $s$ under policy $\pi$ , and $Q_{R}^{\pi}(s, a) \doteq \mathbb{E}_{\tau \sim \pi}[\sum_{t=0}^{\infty} \gamma^{t} R(s_{t}, a_{t}) | s_{0} = s, a_{0} = a]$ is the expected reward from state $s$ and initial action $a$ , and thereafter following policy $\pi$ . These definitions allow us to express the reward performance of one policy $\pi'$ in terms of another $\pi$ :
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
J _ {R} \left(\pi^ {\prime}\right) - J _ {R} (\pi) = \frac {1}{1 - \gamma} \mathbb {E} _ {s \sim d ^ {\pi^ {\prime}}, a \sim \pi^ {\prime}} \left[ A _ {R} ^ {\pi} (s, a) \right].
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
Similarly, we can define $A_D^\pi(s,a)$ , $Q_D^\pi(s,a)$ and $V_D^\pi(s)$ for the divergence cost, and $A_C^\pi(s,a)$ , $Q_C^\pi(s,a)$ and $V_C^\pi(s)$ for the constraint cost.
|
| 89 |
+
|
| 90 |
+
# 4. Safe Policy Adaptation with Constrained Exploration (SPACE)
|
| 91 |
+
|
| 92 |
+
We now describe the proposed iterative algorithm illustrated in Fig. 1. In what follows, $\pi^k$ denotes the learned policy after iteration $k$ , and $M$ denotes a distance measure between policies. For example, $M$ may be the 2-norm of the difference of policy parameters or some average over the states of the KL-divergence of the action policy distributions.
|
| 93 |
+
|
| 94 |
+
Step 1. We perform one step of trust region policy optimization (Schulman et al., 2015). This maximizes the reward advantage function $A_R^\pi(s, a)$ over a KL-divergence neighborhood of $\pi^k$ :
|
| 95 |
+
|
| 96 |
+
$$
|
| 97 |
+
\begin{array}{l} \pi^ {k + \frac {1}{3}} = \underset {\pi} {\arg \max } \mathbb {E} _ {s \sim d ^ {\pi^ {k}}, a \sim \pi} \left[ A _ {R} ^ {\pi^ {k}} (s, a) \right] \tag {4} \\ \begin{array}{l} \text {s . t .} \mathbb {E} _ {s \sim d ^ {\pi^ {k}}} \left[ D _ {\mathrm {K L}} (\pi (s) \| \pi^ {k} (s)) \right] \leq \delta . \end{array} \\ \end{array}
|
| 98 |
+
$$
|
| 99 |
+
|
| 100 |
+
Step 2. We project $\pi^{k + \frac{1}{3}}$ onto a region around $\pi_B$ con
|
| 101 |
+
|
| 102 |
+
trolled by $h_D^k$ to minimize $M$ :
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
\begin{array}{l} \pi^ {k + \frac {2}{3}} = \arg \min M (\pi , \pi^ {k + \frac {1}{3}}) \\ \text {s . t .} J _ {D} \left(\pi^ {k}\right) + \frac {1}{1 - \gamma} \mathbb {E} _ {s \sim d ^ {\pi^ {k}}, a \sim \pi} \left[ A _ {D} ^ {\pi^ {k}} (s) \right] \leq h _ {D} ^ {k}. \tag {5} \\ \end{array}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
Step 3. We project $\pi^{k + \frac{2}{3}}$ onto the set of policies satisfying the cost constraint to minimize $M$ :
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
\begin{array}{l} \pi^ {k + 1} = \underset {\pi} {\arg \min } M (\pi , \pi^ {k + \frac {2}{3}}) \\ \text {s . t .} J _ {C} \left(\pi^ {k}\right) + \frac {1}{1 - \gamma} \mathbb {E} _ {s \sim d ^ {\pi^ {k}}, a \sim \pi} \left[ A _ {C} ^ {\pi^ {k}} (s, a) \right] \leq h _ {C}. \tag {6} \\ \end{array}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
Remarks. Since we use a small step size $\delta$ , we can replace the state distribution $d^{\pi}$ with $d^{\pi^k}$ in Eq. (5) and (6) and hence compute $A_D^{\pi^k}$ and $A_C^{\pi^k}$ . Please see the supplementary material for the derivation of this approximation.
|
| 115 |
+
|
| 116 |
+
Control $h_D^k$ in Step 2. We select $h_D^0$ to be small and gradually increase $h_D^k$ at each iteration to expand the region around $\pi_B$ . Specifically, we make $h_D^{k+1} > h_D^k$ if:
|
| 117 |
+
|
| 118 |
+
(a) $J_{C}(\pi^{k}) > J_{C}(\pi^{k - 1})$ : this increase is to ensure a nonempty intersection between the region around $\pi_B$ and the cost constraint set (feasibility). See Fig. 1(b).
|
| 119 |
+
(b) $J_{R}(\pi^{k}) < J_{R}(\pi^{k - 1})$ : this increase gives the next policy more freedom to improve the reward and the cost constraint performance (exploration). See Fig. 1(c).
|
| 120 |
+
|
| 121 |
+
It remains to determine how to set the new value of $h_D^{k+1}$ . Let $\mathcal{U}_1$ denote the set of policies satisfying the cost constraint, and $\mathcal{U}_2^k$ denote the set of policies in the region around $\pi_B$ controlled by $h_D^k$ . Then we have the following Lemma.
|
| 122 |
+
|
| 123 |
+
Lemma 4.1 (Updating $h_D$ ). If at step $k + 1$ : $h_D^{k + 1} \geq \mathcal{O}\big((J_C(\pi^k) - h_C)^2\big) + h_D^k$ , then $\mathcal{U}_1 \cap \mathcal{U}_2^{k + 1} \neq \emptyset$ (feasibility) and $\mathcal{U}_2^{k + 1} \cap \partial \mathcal{U}_1 \neq \emptyset$ (exploration).
|
| 124 |
+
|
| 125 |
+
Proof. Proved by Three-point Lemma (Chen & Teboulle, 1993). See the supplementary material for more details. $\square$
|
| 126 |
+
|
| 127 |
+
Remarks. Two values are in the big $\mathcal{O}$ . The first value depends on the discounted factor $\gamma$ , and the second value depends on relative distances between $\pi^k$ , $\pi_B$ , and the policy in $\partial \mathcal{U}_1$ . The intuition is that the smaller the distances are, the smaller the update of $h_D^k$ is.
|
| 128 |
+
|
| 129 |
+
Importantly, Lemma 4.1 ensures that the boundaries of the region around $\pi_B$ determined by $h_D$ and the set of policies satisfying the cost constraint intersect. Note that $h_D$ will become large enough to guarantee feasibility during training. This adaptable constraint set, in contrast to the fixed constraint set in PCPO, allows the learning algorithm to explore policies within the cost constraint set while still learning from the baseline policy. Compared to other CMDP approaches, the step of projecting close to $\pi_B$ allows the policy to quickly improve. Compared to behavior cloning, the steps of reward optimization and constraint projection
|
| 130 |
+
|
| 131 |
+
# Algorithm 1 SPACE
|
| 132 |
+
|
| 133 |
+
Initialize a policy $\pi^0 = \pi (\cdot |\pmb{\theta}^0)$ and a trajectory buffer $\mathcal{B}$ for $k = 0,1,2,\dots$ do
|
| 134 |
+
|
| 135 |
+
Run $\pi^k = \pi (\cdot |\pmb{\theta}^k)$ and store trajectories in $\mathcal{B}$
|
| 136 |
+
|
| 137 |
+
Obtain $\pmb{\theta}^{k + 1}$ using the update in Eq. (10)
|
| 138 |
+
|
| 139 |
+
If $J_{C}(\pi^{k}) > J_{C}(\pi^{k - 1})$ or $J_{R}(\pi^{k}) < J_{R}(\pi^{k - 1})$
|
| 140 |
+
|
| 141 |
+
Update $h_D^{k+1}$ using Lemma 4.1
|
| 142 |
+
|
| 143 |
+
Empty $\mathcal{B}$
|
| 144 |
+
|
| 145 |
+
allow the policy to achieve good final performance. We examine the importance of updating $h_D$ in Section 6.
|
| 146 |
+
|
| 147 |
+
# 5. A Theoretical Analysis of SPACE
|
| 148 |
+
|
| 149 |
+
We will implement a policy as a neural network with fixed architecture parameterized by $\pmb{\theta} \in \mathbb{R}^n$ . We then learn a policy from the achievable set $\{\pi(\cdot|\pmb{\theta}) \colon \pmb{\theta} \in \mathbb{R}^n\}$ by iteratively learning $\pmb{\theta}$ . Let $\pmb{\theta}^k$ and $\pi^k \doteq \pi(\cdot|\pmb{\theta}^k)$ denote the parameter value and the corresponding policy at step $k$ . In this setting, it is impractical to solve for the policy updates in Eq. (4), (5) and (6). Hence we approximate the reward function and constraints with first order Taylor expansions, and KL-divergence with a second order Taylor expansion. We will need the following derivatives:
|
| 150 |
+
|
| 151 |
+
(1) $\pmb{g}^{k}\doteq \nabla_{\pmb{\theta}}\mathbb{E}_{s\sim d^{\pi^{k}},a\sim \pi}[A_{R}^{\pi^{k}}(s,a)],$
|
| 152 |
+
(2) $\pmb{a}^{k}\doteq \nabla_{\pmb{\theta}}\mathbb{E}_{s\sim d^{\pi^{k}}, a\sim \pi}[A_{D}^{\pi^{k}}(s)],$
|
| 153 |
+
(3) $c^k \doteq \nabla_\theta \mathbb{E}_{s \sim d^{\pi^k}, a \sim \pi} [A_C^\pi^k(s, a)]$ , and
|
| 154 |
+
(4) $\pmb{F}^{k}\doteq \nabla_{\pmb{\theta}}^{2}\mathbb{E}_{s\sim d^{\pi^{k}}}\left[D_{\mathrm{KL}}(\pi (s)\| \pi^{k}(s))\right].$
|
| 155 |
+
|
| 156 |
+
Each of these derivatives are taken w.r.t. the neural network parameter and evaluated at $\theta^k$ . We also define $b^k \doteq J_D(\pi^k) - h_D^k$ , and $d^k \doteq J_C(\pi^k) - h_C$ . Let $u^k \doteq \sqrt{\frac{2\delta}{g^{kT}F^{k-1}g^k}}$ , and $\pmb{L} = \pmb{I}$ for the 2-norm projection and $\pmb{L} = \pmb{F}^k$ for the KL-divergence projection.
|
| 157 |
+
|
| 158 |
+
Step 1. Approximating Eq. (4) yields
|
| 159 |
+
|
| 160 |
+
$$
|
| 161 |
+
\begin{array}{l} \boldsymbol {\theta} ^ {k + \frac {1}{3}} = \underset {\boldsymbol {\theta}} {\arg \max } \mathbf {g} ^ {k T} (\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k}) \tag {7} \\ \begin{array}{l} \text {s . t .} \frac {1}{2} (\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k}) ^ {T} \boldsymbol {F} ^ {k} (\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k}) \leq \delta . \end{array} \\ \end{array}
|
| 162 |
+
$$
|
| 163 |
+
|
| 164 |
+
Step 2 & 3. Approximating Eq. (5) and (6), similarly yields
|
| 165 |
+
|
| 166 |
+
$$
|
| 167 |
+
\begin{array}{l} \boldsymbol {\theta} ^ {k + \frac {2}{3}} = \underset {\boldsymbol {\theta}} {\arg \min } \frac {1}{2} \left(\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k + \frac {1}{3}}\right) ^ {T} \boldsymbol {L} \left(\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k + \frac {1}{3}}\right) \tag {8} \\ \begin{array}{l} \text {s . t .} \boldsymbol {a} ^ {k ^ {T}} (\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k}) + b ^ {k} \leq 0, \end{array} \\ \end{array}
|
| 168 |
+
$$
|
| 169 |
+
|
| 170 |
+
$$
|
| 171 |
+
\begin{array}{l} \boldsymbol {\theta} ^ {k + 1} = \underset {\boldsymbol {\theta}} {\arg \min } \frac {1}{2} \left(\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k + \frac {2}{3}}\right) ^ {T} \boldsymbol {L} \left(\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k + \frac {2}{3}}\right) \tag {9} \\ \begin{array}{l} \text {s . t .} \boldsymbol {c} ^ {k T} (\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k}) + d ^ {k} \leq 0, \end{array} \\ \end{array}
|
| 172 |
+
$$
|
| 173 |
+
|
| 174 |
+
where $\pmb{L} = \pmb{I}$ for the 2-norm projection and $\pmb{L} = \pmb{F}^{k}$ for the KL-divergence projection. We solve these problems using
|
| 175 |
+
|
| 176 |
+
convex programming, then we have $((\cdot)^{+}$ is $\max (0,\cdot))$
|
| 177 |
+
|
| 178 |
+
$$
|
| 179 |
+
\begin{array}{l} \boldsymbol {\theta} ^ {k + 1} = \boldsymbol {\theta} ^ {k} + u ^ {k} \boldsymbol {F} ^ {k - 1} \boldsymbol {g} ^ {k} \\ - \left(\frac {u ^ {k} \boldsymbol {a} ^ {k T} \boldsymbol {F} ^ {k - 1} \boldsymbol {g} ^ {k} + b ^ {k}}{\boldsymbol {a} ^ {k T} \boldsymbol {L} ^ {- 1} \boldsymbol {a} ^ {k}}\right) ^ {+} \boldsymbol {L} ^ {- 1} \boldsymbol {a} ^ {k} \tag {10} \\ - \left(\frac {u ^ {k} \boldsymbol {c} ^ {k ^ {\top}} \boldsymbol {F} ^ {\overline {{k}} - 1} \boldsymbol {\bar {g}} ^ {k} + d ^ {k}}{\boldsymbol {c} ^ {k T} \boldsymbol {L} ^ {- 1} \boldsymbol {c} ^ {k}}\right) ^ {+} \boldsymbol {L} ^ {- 1} \boldsymbol {c} ^ {k}. \\ \end{array}
|
| 180 |
+
$$
|
| 181 |
+
|
| 182 |
+
Algorithm 1 shows the corresponding pseudocode.
|
| 183 |
+
|
| 184 |
+
Convergence analysis. We consider the following simplified problem to provide a convergence guarantee of SPACE:
|
| 185 |
+
|
| 186 |
+
$$
|
| 187 |
+
\min _ {\boldsymbol {\theta} \in \mathcal {C} _ {1} \cap \mathcal {C} _ {2}} f (\boldsymbol {\theta}), \tag {11}
|
| 188 |
+
$$
|
| 189 |
+
|
| 190 |
+
where $f: \mathbb{R}^n \to \mathbb{R}$ is a twice continuously differentiable function at every point in a open set $\mathcal{X} \subseteq \mathbb{R}^n$ , and $\mathcal{C}_1 \subseteq \mathcal{X}$ and $\mathcal{C}_2 \subseteq \mathcal{X}$ are compact convex sets with $\mathcal{C}_1 \cap \mathcal{C}_2 \neq \emptyset$ . The function $f$ is the negative reward function of our CMDP, and the two constraint sets represent the cost constraint set and the region around the baseline policy $\pi_B$ .
|
| 191 |
+
|
| 192 |
+
For a vector $\pmb{x}$ , let $\| \pmb{x} \|$ denote the Euclidean norm. For a matrix $M$ let $\| M \|$ denote the induced matrix 2-norm, and $\sigma_{i}(M)$ denote the $i$ -th largest singular value of $M$ .
|
| 193 |
+
|
| 194 |
+
Assumption 1. We assume:
|
| 195 |
+
|
| 196 |
+
(1.1) The gradient $\nabla f$ is $L$ -Lipschitz continuous over a open set $\mathcal{X}$ .
|
| 197 |
+
(1.2) For some constant $G$ , $\| \nabla f(\pmb{\theta}) \| \leq G$ .
|
| 198 |
+
(1.3) For a constant $H$ , $\mathrm{diam}(\mathcal{C}_1) \leq H$ and $\mathrm{diam}(\mathcal{C}_2) \leq H$ .
|
| 199 |
+
|
| 200 |
+
Assumptions (1.1) and (1.2) ensure that the gradient can not change too rapidly and the norm of the gradient can not be too large. (1.3) implies that for every iteration, the diameter of the region around $\pi_B$ is bounded above by $H$ .
|
| 201 |
+
|
| 202 |
+
We will need a concept of an $\epsilon$ -first order stationary point (Mokhtari et al., 2018). For $\epsilon > 0$ , we say that $\pmb{\theta}^{*} \in \mathcal{C}_{1} \cap \mathcal{C}_{2}$ an $\epsilon$ -first order stationary point ( $\epsilon$ -FOSP) of Problem (11) under KL-divergence projection if
|
| 203 |
+
|
| 204 |
+
$$
|
| 205 |
+
\nabla f \left(\boldsymbol {\theta} ^ {*}\right) ^ {T} \left(\boldsymbol {\theta} - \boldsymbol {\theta} ^ {*}\right) \geq - \epsilon , \quad \forall \boldsymbol {\theta} \in \mathcal {C} _ {1} \cap \mathcal {C} _ {2}. \tag {12}
|
| 206 |
+
$$
|
| 207 |
+
|
| 208 |
+
Similarly, under the 2-norm projection, $\pmb{\theta}^{*}\in \mathcal{C}_{1}\cap \mathcal{C}_{2}$ an $\epsilon$ -FOSP of (11) if
|
| 209 |
+
|
| 210 |
+
$$
|
| 211 |
+
\nabla f \left(\boldsymbol {\theta} ^ {*}\right) ^ {T} \boldsymbol {F} ^ {*} \left(\boldsymbol {\theta} - \boldsymbol {\theta} ^ {*}\right) \geq - \epsilon , \quad \forall \boldsymbol {\theta} \in \mathcal {C} _ {1} \cap \mathcal {C} _ {2}, \tag {13}
|
| 212 |
+
$$
|
| 213 |
+
|
| 214 |
+
where $F^{*}\doteq \nabla_{\pmb{\theta}}^{2}\mathbb{E}_{s\sim d^{\pi^{*}}}[D_{\mathrm{KL}}(\pi (s)\| \pi^{*}(s))]$ . Notice that SPACE converges to distinct stationary points under the two possible projections (see the supplementary material). With these assumptions, we have the following Theorem.
|
| 215 |
+
|
| 216 |
+
Theorem 5.1 (Finite-Time Convergence Guarantee of SPACE). Under the KL-divergence projection, there exists a sequence $\{\eta^k\}$ such that SPACE converges to an $\epsilon$ -FOSP
|
| 217 |
+
|
| 218 |
+
in at most $\mathcal{O}(\epsilon^{-2})$ iterations. Moreover, at step $k + 1$
|
| 219 |
+
|
| 220 |
+
$$
|
| 221 |
+
f \left(\boldsymbol {\theta} ^ {k + 1}\right) \leq f \left(\boldsymbol {\theta} ^ {k}\right) - \frac {L \epsilon^ {2}}{2 \left(G + \frac {H \sigma_ {1} \left(\boldsymbol {F} ^ {k}\right)}{\eta^ {k}}\right) ^ {2}}. \tag {14}
|
| 222 |
+
$$
|
| 223 |
+
|
| 224 |
+
Similarly, under the 2-norm projection, there exists a sequence $\{\eta^k\}$ such that SPACE converges to an $\epsilon$ -FOSP in at most $\mathcal{O}(\epsilon^{-2})$ iterations. Moreover, at step $k + 1$
|
| 225 |
+
|
| 226 |
+
$$
|
| 227 |
+
f \left(\boldsymbol {\theta} ^ {k + 1}\right) \leq f \left(\boldsymbol {\theta} ^ {k}\right) - \frac {L \epsilon^ {2}}{2 \left(G \sigma_ {1} \left(\boldsymbol {F} ^ {k} {} ^ {- 1}\right) + \frac {H}{\eta^ {k}}\right) ^ {2}}. \tag {15}
|
| 228 |
+
$$
|
| 229 |
+
|
| 230 |
+
Proof. The proof and the sequence $\{\eta^k\}$ are given in the supplementary material.
|
| 231 |
+
|
| 232 |
+
We now make several observations for Theorem 5.1.
|
| 233 |
+
|
| 234 |
+
(1) The smaller $H$ is, the greater the decrease in the objective. This observation supports the idea of starting with a small value for $h_D$ and increasing it only when needed.
|
| 235 |
+
(2) Under the KL-divergence projection, the effect of $\sigma_{1}(\pmb{F}^{k})$ is negligible. This is because in this case $\eta^k$ is proportional to $\sigma_{1}(\pmb{F}^{k})$ . Hence $\sigma_{1}(\pmb{F}^{k})$ does not play a major role in decreasing the objective value.
|
| 236 |
+
(3) Under the 2-norm projection, the smaller $\sigma_1(F^{k-1})$ (i.e., larger $\sigma_n(F^k)$ ) is, the greater the decrease in the objective. This is because a large $\sigma_n(F^k)$ means a large curvature of $f$ in all directions. This implies that the 2-norm distance between the pre-projection and post-projection points is small, leading to a small deviation from the reward improvement direction after doing projections.
|
| 237 |
+
|
| 238 |
+
Comparison to Yang et al. (2020). Our work is inspired by PCPO (Yang et al., 2020), which also uses projections to ensure constraint satisfaction during policy learning. However, there are a few key differences between our work and PCPO. (1) Algorithm. PCPO does not have the capability to safely exploit a baseline policy, which makes it less sample efficient in cases when we have demonstrations or teacher agents. In addition, SPACE's update dynamically sets distances between policies while PCPO does not—this update is important to effectively and safely learn from the baseline policy. (2) Theory. Our analysis provides a safety guarantee to ensure the feasibility of the optimization problem while Yang et al. (2020) do not. Merely adding an IL objective in the reward objective of PCPO cannot make the agent learn efficiently, as shown in our experiments (Section 6.2). In addition, compared to the analysis in Yang et al. (2020), Theorem 5.1 shows the existence of the step size for each iteration. (3) Problem. Finally, our work tackles a different problem compared to PCPO (which only ensures safety). We focus on how to safely and efficiently learn from an existing baseline policy, which is more conducive to practical applications of safe RL.
|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
(a) Gather
|
| 242 |
+
|
| 243 |
+

|
| 244 |
+
(b) Circle
|
| 245 |
+
|
| 246 |
+

|
| 247 |
+
(c) Grid
|
| 248 |
+
Figure 2. (a) Gather: the agent is rewarded for gathering green apples, but is constrained to collect a limited number of red apples (Achiam et al., 2017). (b) Circle: the agent is rewarded for moving in a specified wide circle, but is constrained to stay within a safe region smaller than the radius of the circle (Achiam et al., 2017). (c) Grid: the agent controls the traffic lights in a grid road network and is rewarded for high throughput, but is constrained to let lights stay red for at most 7 consecutive seconds (Vinitsky et al., 2018). (d) Bottleneck: the agent controls a set of autonomous vehicles (shown in red) in a traffic merge situation and is rewarded for achieving high throughput, but constrained to ensure that human-driven vehicles (shown in white) have low speed for no more than 10 seconds (Vinitsky et al., 2018). (e) Car-racing: the agent controls an autonomous vehicle on a race track and is rewarded for driving through as many tiles as possible, but is constrained to use the brakes at most 5 times to encourage a smooth ride (Brockman et al., 2016). (f) A human player plays car-racing with demonstration data logged. These tasks are to show the applicability of our approach to a diverse set of problems.
|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
(d) Bottleneck
|
| 252 |
+
|
| 253 |
+

|
| 254 |
+
(e) Car-racing
|
| 255 |
+
|
| 256 |
+

|
| 257 |
+
(f) Demo.
|
| 258 |
+
|
| 259 |
+
# 6. Experiments
|
| 260 |
+
|
| 261 |
+
Our experiments study the following three questions: (1) How does SPACE perform compared to other baselines in behavior cloning and safe RL in terms of learning efficiency and constraint satisfaction? (2) How does SPACE trained with sub-optimal $\pi_B$ perform (e.g., human demonstration)? (3) How does the step 2 in SPACE affects the performance?
|
| 262 |
+
|
| 263 |
+
# 6.1. Setup
|
| 264 |
+
|
| 265 |
+
Tasks. We compare the proposed algorithm with existing approaches on five control tasks: three tasks with safety constraints ((a), (b) and (e) in Fig. 2), and two tasks with fairness constraints ((c) and (d) in Fig. 2). These tasks are briefly described in the caption of Fig. 2. We chose the traffic management tasks since a good control policy can benefit millions of drivers. In addition, we chose the car-racing task since a good algorithm should safely learn from baseline human policies. For all the algorithms, we use neural networks to represent Gaussian policies. We use the KL-divergence projection in the Mujoco and car-racing tasks, and the 2-norm projection in the traffic management task since it achieves better performance. We use a grid-search to select for the hyper-parameters. See the supplementary material for more experimental details.
|
| 266 |
+
|
| 267 |
+
Baseline policies $\pi_{B}$ . To test whether SPACE can safely and efficiently leverage the baseline policy, we consider three variants of the baseline policies.
|
| 268 |
+
|
| 269 |
+
(1) Sub-optimal $\pi_B^{\mathrm{cost}}$ with $J_{C}(\pi_{B}^{\mathrm{cost}})\approx 0$
|
| 270 |
+
(2) Sub-optimal $\pi_B^{\mathrm{reward}}$ with $J_{C}(\pi_{B}^{\mathrm{reward}}) > h_{C}$ .
|
| 271 |
+
(3) $\pi_B^{\mathrm{near}}$ with $J_{C}(\pi_{B}^{\mathrm{near}})\approx h_{C}$ (i.e., the baseline policy has the same cost constraint as the agent, but is not guaranteed to have an optimal reward performance).
|
| 272 |
+
|
| 273 |
+
These $\pi_B$ have different degrees of constraint satisfaction.
|
| 274 |
+
|
| 275 |
+
This is to examine whether SPACE can safely learn from sub-optimal $\pi_B$ . In addition, in the car-racing task we pretrain $\pi_B$ using an off-policy algorithm (DDPG (Lillicrap et al., 2016)), which directly learns from human demonstration data (Fig. 2(f)). This is to demonstrate that $\pi_B$ may come from a teacher or demonstration data. This suboptimal human baseline policy is denoted by $\pi_B^{\mathrm{human}}$ .
|
| 276 |
+
|
| 277 |
+
For the ease of computation, we update $h_D$ using $v \cdot (J_C(\pi^k) - h_C)^2 + h_D^k$ from Lemma 4.1, with a constant $v > 0$ . We found that the performance is not heavily affected by $v$ since we will update $h_D$ at later iteration. The ablation studies of $v$ can be found in Appendix E.1.
|
| 278 |
+
|
| 279 |
+
**Baseline algorithms.** Our goal is to study how to safely and efficiently learn from sub-optimal (possibly unsafe) baseline policies. We compare SPACE with five baseline methods that combine behavior cloning and safe RL algorithms.
|
| 280 |
+
|
| 281 |
+
(1) Fixed-point Constrained Policy Optimization (f-CPO). In f-CPO, we add the divergence objective in the reward function. The weight $\lambda$ is fixed followed by a CPO update (optimize the reward and divergence cost w.r.t. the trust region and the cost constraints). The f-CPO policy update solves (Achiam et al., 2017):
|
| 282 |
+
|
| 283 |
+
$$
|
| 284 |
+
\boldsymbol {\theta} ^ {k + 1} = \underset {\boldsymbol {\theta}} {\arg \max } \left(\boldsymbol {g} ^ {k} + \lambda \boldsymbol {a} ^ {k}\right) ^ {T} \left(\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k}\right)
|
| 285 |
+
$$
|
| 286 |
+
|
| 287 |
+
$$
|
| 288 |
+
\text {s . t .} \frac {1}{2} \left(\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k}\right) ^ {T} \boldsymbol {F} ^ {k} \left(\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k}\right) \leq \delta
|
| 289 |
+
$$
|
| 290 |
+
|
| 291 |
+
$$
|
| 292 |
+
\boldsymbol {c} ^ {k ^ {T}} \left(\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k}\right) + d ^ {k} \leq 0.
|
| 293 |
+
$$
|
| 294 |
+
|
| 295 |
+
(2) Fixed-point PCPO (f-PCPO). In f-PCPO, we add the divergence objective in the reward function. The weight $\lambda$ is fixed followed by a PCPO update (two-step process: optimize the reward and divergence cost, followed by the
|
| 296 |
+
|
| 297 |
+

|
| 298 |
+
Figure 3. The discounted reward, the undiscounted constraint cost, and the undiscounted divergence cost over policy updates for the tested algorithms and tasks. The solid line is the mean and the shaded area is the standard deviation over 5 runs (random seed). The baseline policies in the grid and bottleneck tasks are $\pi_B^{\mathrm{near}}$ , and the baseline policy in the car-racing task is $\pi_B^{\mathrm{human}}$ . The black dashed line is the cost constraint threshold $h_C$ . We observe that SPACE is the only algorithm that satisfies the constraints while achieving superior reward performance. Although $\pi_B^{\mathrm{human}}$ has substantially low reward, SPACE still can learn to improve the reward. (We show the results in these tasks as representative cases since they are more challenging. Please read Appendix for more results. Best viewed in color.)
|
| 299 |
+
|
| 300 |
+
projection to the safe set). The f-PCPO policy update solves:
|
| 301 |
+
|
| 302 |
+
$$
|
| 303 |
+
\boldsymbol {\theta} ^ {k + \frac {1}{2}} = \underset {\boldsymbol {\theta}} {\arg \max } \left(\boldsymbol {g} ^ {k} + \lambda \boldsymbol {a} ^ {k}\right) ^ {T} (\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k})
|
| 304 |
+
$$
|
| 305 |
+
|
| 306 |
+
s.t. $\frac{1}{2} (\pmb {\theta} - \pmb{\theta}^k)^T\pmb {F}^k (\pmb {\theta} - \pmb{\theta}^k)\leq \delta$ (trust region)
|
| 307 |
+
|
| 308 |
+
$$
|
| 309 |
+
\boldsymbol {\theta} ^ {k + 1} = \underset {\boldsymbol {\theta}} {\arg \min } \frac {1}{2} \left(\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k + \frac {1}{2}}\right) ^ {T} \boldsymbol {L} \left(\boldsymbol {\theta} - \boldsymbol {\theta} ^ {k + \frac {1}{2}}\right)
|
| 310 |
+
$$
|
| 311 |
+
|
| 312 |
+
s.t. $\pmb{c}^{kT}(\pmb{\theta} - \pmb{\theta}^{k}) + d^{k} \leq 0$ . (cost constraint)
|
| 313 |
+
|
| 314 |
+
(3) Dynamic-point Constrained Policy Optimization (d-CPO). The d-CPO update solves f-CPO problem with a stateful $\lambda^{k + 1} = (\lambda^k)^\beta$ , where $0 < \beta < 1$ . This is inspired by Rajeswaran et al. (2017), in which they have the same weight-scheduling method to adjust $\lambda^k$ .
|
| 315 |
+
(4) Dynamic-point PCPO (d-PCPO). The d-PCPO update solves f-PCPO problem with a stateful $\lambda^{k + 1} = (\lambda^k)^\beta$
|
| 316 |
+
|
| 317 |
+
where $0 < \beta < 1$ .
|
| 318 |
+
|
| 319 |
+
For all the experiments and the algorithms, the weight is fixed and it is set to 1. Note that both d-CPO and d-PCPO regularize the standard RL objective with the distance w.r.t. the baseline policy and make the regularization parameter (i.e., $\lambda$ ) fade over time. This is a common practice to learn from the baseline policy. In addition, in many real applications you cannot have access to parameterized $\pi_B$ (e.g., neural network policies) or you want to design a policy with different architectures than $\pi_B$ . Hence in our setting, we cannot directly initialize the learning policy with the baseline policy and then fine-tune it.
|
| 320 |
+
|
| 321 |
+
# 6.2. Results
|
| 322 |
+
|
| 323 |
+
Overall performance. The learning curves of the discounted reward $(J_R(\pi))$ , the undiscounted constraint cost
|
| 324 |
+
|
| 325 |
+

|
| 326 |
+
|
| 327 |
+

|
| 328 |
+
|
| 329 |
+

|
| 330 |
+
|
| 331 |
+

|
| 332 |
+
|
| 333 |
+

|
| 334 |
+
Figure 4. Learning from sub-optimal $\pi_B$ . The undiscounted constraint cost and the discounted reward over policy updates for the gather and the circle tasks. The solid line is the mean and the shaded area is the standard deviation over 5 runs. The black dashed line is the cost constraint threshold $h_C$ . We observe that SPACE satisfies the cost constraints even when learning from the sub-optimal $\pi_B$ .
|
| 335 |
+
|
| 336 |
+

|
| 337 |
+
Figure 5. Ablation studies on the fixed $h_D$ . The undiscounted constraint cost and the discounted reward over policy updates for the gather and the circle tasks. The solid line is the mean and the shaded area is the standard deviation over 5 runs. The black dashed line is the cost constraint threshold $h_C$ . We observe that the update rule is critical for ensuring the learning performance improvement.
|
| 338 |
+
|
| 339 |
+

|
| 340 |
+
|
| 341 |
+

|
| 342 |
+
|
| 343 |
+
$(J_{C}(\pi))$ , and the undiscounted divergence cost $(J_{D}(\pi))$ over policy updates are shown for all tested algorithms and tasks in Fig. 3. We use $\pi_B^{\mathrm{near}}$ in bottleneck and grid tasks, and $\pi_B^{\mathrm{human}}$ in car-racing task. Note that $\pi_B^{\mathrm{human}}$ from human demonstration is highly sub-optimal to the agent (i.e., $J_{R}(\pi_{B}^{\mathrm{human}})$ is small). The value of the reward is only around 5 as shown in the plot. It does not solve the task at hand. Overall, we observe that (1) SPACE achieves at least 2 times faster cost constraint satisfaction in all cases even learning from $\pi_B^{\mathrm{human}}$ . (2) SPACE achieves at least $10\%$ more reward in the bottleneck and car-racing tasks compared to the best baseline, and (3) SPACE is the only algorithm that satisfies the cost constraints in all cases. In contrast, even if f(d)-CPO and f(d)-PCPO (similar to behavior cloning) are provided with good baseline policies $\pi_B^{\mathrm{near}}$ , they do not learn efficiently due to the conflicting reward and cost objectives. In addition, PCPO are less sample-efficient, which shows the accelerated learning of SPACE.
|
| 344 |
+
|
| 345 |
+
For example, in the car-racing task we observe that $J_{D}(\pi)$ in SPACE decreases at the initial iteration, but increases in the end. This implies that the learned policy is guided by the baseline policy $\pi_B^{\mathrm{human}}$ in the beginning, but use less su
|
| 346 |
+
|
| 347 |
+
pervision in the end. In addition, in the grid task we observe that the final reward of SPACE is lower than the baseline algorithm. This is because that SPACE converges to a policy in the cost constraint set, whereas the baseline algorithms do not find constraint-satisfying policies. Furthermore, we observe that $J_{D}(\pi)$ in the traffic tasks decreases throughout the training. This implies that SPACE intelligently adjusts $h_{D}^{k}$ w.r.t. the performance of $\pi_{B}$ to achieve safe learning.
|
| 348 |
+
|
| 349 |
+
f-CPO and f-PCPO. f-CPO and f-PCPO fail to improve the reward and have more cost violations. Most likely this is due to persistent supervision from the baseline policies which need not satisfy the cost constraints nor have high reward. For example, in the car-racing task we observe that the value of the divergence cost decreases throughout the training. This implies that the learned policy overly evolves to the suboptimal $\pi_B$ and hence degrades the reward performance.
|
| 350 |
+
|
| 351 |
+
d-CPO and d-PCPO. d-CPO and d-PCPO improve the reward slowly and have more cost violations. They do not use projection to quickly learn from $\pi_B$ . For example, in the car-racing task $J_{D}(\pi)$ in d-CPO and d-PCPO are high compared to SPACE throughout the training. This suggests
|
| 352 |
+
|
| 353 |
+
that simply regularizing the RL objective with the faded weight is susceptible to a sub-optimal $\pi_B$ . In contrast to this heuristic, we use Lemma 4.1 to update $h_D$ when needed, allowing $\pi_B$ to influence the learning of the agent at any iterations depending on the learning progress of the agent.
|
| 354 |
+
|
| 355 |
+
Importantly, in our setup the agent does not have any prior knowledge about $\pi_B$ . The agent has to stay close to $\pi_B$ to verify its reward and cost performance. It is true that $\pi_B$ may be constraint-violating, but it may also provide a useful signal for maximizing the reward. For example, in the grid task (Fig. 3), although $\pi_B$ does not satisfy the cost constraint, it still helps the SPACE agent (by being close to $\pi_B$ ) to achieve faster cost satisfaction.
|
| 356 |
+
|
| 357 |
+
Having demonstrated the overall effectiveness of SPACE, our remaining experiments explore (1) SPACE's ability to safely learn from sub-optimal polices, and (2) the importance of the update method in Lemma 4.1. For compactness, we restrict our consideration on SPACE and the Mujoco tasks, which are widely used in RL community.
|
| 358 |
+
|
| 359 |
+
Sub-optimal $\pi_B^{\mathrm{cost}}$ and $\pi_B^{\mathrm{reward}}$ . Next, we test whether SPACE can learn from sub-optimal $\pi_B$ . The learning curves of $J_{C}(\pi)$ and $J_{R}(\pi)$ over policy updates are shown for the gather and circle tasks in Fig. 4. We use two sub-optimal $\pi_B$ : $\pi_B^{\mathrm{cost}}$ and $\pi_B^{\mathrm{reward}}$ , and learning agent's $h_C$ is set to 0.5 (i.e., $\pi_B$ do not solve the task at hand). We observe that SPACE robustly satisfies the cost constraints in all cases even when learning from $\pi_B^{\mathrm{reward}}$ . In addition, we observe that learning guided by $\pi_B^{\mathrm{reward}}$ achieves faster reward learning efficiency at the initial iteration. This is because $J_{R}(\pi_{B}^{\mathrm{reward}}) > J_{R}(\pi_{B}^{\mathrm{cost}})$ as seen in the reward plot. Furthermore, we observe that learning guided by $\pi_B^{\mathrm{cost}}$ achieves faster reward learning efficiency at the later iteration. This is because by starting in the interior of the cost constraint set (i.e., $J_{C}(\pi_{B}^{\mathrm{cost}}) \approx 0 \leq h_{C}$ ), the agent can safely exploit the baseline policy. The results show SPACE enables fast convergence to a constraint-satisfying policy, even if $\pi_B$ does not meet the constraint or does not optimize the reward.
|
| 360 |
+
|
| 361 |
+
SPACE with fixed $h_D$ . In our final experiments, we investigate the importance of updating $h_D$ when learning from a sub-optimal $\pi_B$ . The learning curves of the $J_C(\pi)$ and $J_R(\pi)$ over policy updates are shown for the gather and circle tasks in Fig. 5. We observe that SPACE with fixed $h_D$ converges to less reward. For example, in the circle task SPACE with the dynamic $h_D$ achieves 2.3 times more reward. This shows that $\pi_B$ in this task is highly sub-optimal to the agent and the need of using stateful $h_D^k$ .
|
| 362 |
+
|
| 363 |
+
Moreover, Fig. 6 shows the divergence cost $J_{D}(\pi)$ and the value of $h_{D}$ over the iterations in the car-racing task. We observe that SPACE gradually increases $h_{D}$ to improve reward and cost satisfaction performance.
|
| 364 |
+
|
| 365 |
+

|
| 366 |
+
Figure 6. The divergence cost $J_{D}(\pi)$ and the value of $h_{D}$ over the iterations in the car-racing task. We see that SPACE controls $h_{D}$ to ensure divergence constraint satisfaction.
|
| 367 |
+
|
| 368 |
+
# 7. Conclusion
|
| 369 |
+
|
| 370 |
+
In this work, we addressed the problem of learning constraint-satisfying policies given potentially sub-optimal baseline policies. We explicitly analyzed how to safely learn from the baseline policy, and hence proposed an iterative policy optimization algorithm that alternates between maximizing expected return on the task, minimizing distance to the baseline policy, and projecting the policy onto the constraint-satisfying set. Our algorithm efficiently learns from a baseline policy as well as human provided demonstration data and achieves superior reward and cost performance compared with state-of-the-art approaches (i.e., PCPO).
|
| 371 |
+
|
| 372 |
+
No algorithm is without limitations. Future work could improve SPACE in several ways. For instance, in Lemma 4.1, we do not guarantee that SPACE will increase $h_D$ enough for the region around the baseline policy to contain the optimal policy. This is challenging since the optimization problem is non-convex. One possible solution is to rerun SPACE multiple times and reinitialize $\pi_B$ with the previous learned policy each time. One evidence to support this method is that in the bottleneck task (Fig. 3), the agent trained with SPACE outperforms PCPO agent by achieving higher rewards and faster constraint satisfaction. The PCPO agent here can be seen as the SPACE agent trained without $\pi_B$ . And then we train the SPACE agent with $\pi_B$ from the learned PCPO agent. This shows that based on the learned policy, we can use SPACE to improve performance. In addition, it would be interesting to explore using other types of baseline policies such as rule-based policies and see how they impact the learning dynamics of SPACE.
|
| 373 |
+
|
| 374 |
+
# Acknowledgements
|
| 375 |
+
|
| 376 |
+
The authors would like to thank members of the Princeton NLP Group, the anonymous reviewers, and the area chair for their comments. Tsung-Yen Yang thanks Siemens Corporation, Corporate Technology for their support.
|
| 377 |
+
|
| 378 |
+
# References
|
| 379 |
+
|
| 380 |
+
Abbeel, P., Coates, A., and Ng, A. Y. Autonomous helicopter aerobatics through apprenticeship learning. The International Journal of Robotics Research, 29(13):1608-1639, 2010.
|
| 381 |
+
Abel, D., Salvatier, J., Stuhlmüller, A., and Evans, O. Agent-agnostic human-in-the-loop reinforcement learning. arXiv preprint arXiv:1701.04079, 2017.
|
| 382 |
+
Achiam, J., Held, D., Tamar, A., and Abbeel, P. Constrained policy optimization. In Precup, D. and Teh, Y. W. (eds.), Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pp. 22-31. PMLR, 2017.
|
| 383 |
+
Altman, E. Constrained Markov decision processes, volume 7. CRC Press, 1999.
|
| 384 |
+
Avni, G., Bloem, R., Chatterjee, K., Henzinger, T. A., Konighofer, B., and Pranger, S. Run-time optimization for learned controllers through quantitative games. In International Conference on Computer Aided Verification, pp. 630-649. Springer, 2019.
|
| 385 |
+
Balakrishna, A., Thananjeyan, B., Lee, J., Zahed, A., Li, F., Gonzalez, J. E., and Goldberg, K. On-policy robot imitation learning from a converging supervisor. In Proceedings of the Conference on Robot Learning, 2019.
|
| 386 |
+
Bartocci, E., Grosu, R., Katsaros, P., Ramakrishnan, C., and Smolka, S. A. Model repair for probabilistic systems. In International Conference on Tools and Algorithms for the Construction and Analysis of Systems, pp. 326-340. Springer, 2011.
|
| 387 |
+
Bharadhwaj, H., Kumar, A., Rhinehart, N., Levine, S., Shkurti, F., and Garg, A. Conservative safety critics for exploration. arXiv preprint arXiv:2010.14497, 2020.
|
| 388 |
+
Brockman, G., Cheung, V., Pettersson, L., Schneider, J., Schulman, J., Tang, J., and Zaremba, W. Openai gym. arXiv preprint arXiv:1606.01540, 2016.
|
| 389 |
+
Brown, D. S., Goo, W., Nagarajan, P., and Niekum, S. Extrapolating beyond suboptimal demonstrations via inverse reinforcement learning from observations. In Chaudhuri, K. and Salakhutdinov, R. (eds.), Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pp. 783-792. PMLR, 2019.
|
| 390 |
+
Chen, G. and Teboulle, M. Convergence analysis of a proximal-like minimization algorithm using bregman functions. SIAM Journal on Optimization, 3(3):538-543, 1993.
|
| 391 |
+
|
| 392 |
+
Chernova, S. and Thomaz, A. L. Robot learning from human teachers. Synthesis Lectures on Artificial Intelligence and Machine Learning, 8(3):1-121, 2014.
|
| 393 |
+
Chow, Y., Nachum, O., Duñez-Guzmán, E. A., and Ghavamzadeh, M. A lyapunov-based approach to safe reinforcement learning. In Bengio, S., Wallach, H. M., Larochelle, H., Grauman, K., Cesa-Bianchi, N., and Garnett, R. (eds.), Advances in Neural Information Processing Systems 31: Annual Conference on Neural Information Processing Systems 2018, NeurIPS 2018, December 3-8, 2018, Montréal, Canada, pp. 8103-8112, 2018.
|
| 394 |
+
Chow, Y., Nachum, O., Faust, A., Ghavamzadeh, M., and Duenez-Guzman, E. Lyapunov-based safe policy optimization for continuous control. arXiv preprint arXiv:1901.10031, 2019.
|
| 395 |
+
Dalal, G., Dvijotham, K., Vecerik, M., Hester, T., Paduraru, C., and Tassa, Y. Safe exploration in continuous action spaces. arXiv preprint arXiv:1801.08757, 2018.
|
| 396 |
+
Driessens, K. and Džeroski, S. Integrating guidance into relational reinforcement learning. Machine Learning, 57 (3):271-304, 2004.
|
| 397 |
+
Duan, Y., Chen, X., Houthooft, R., Schulman, J., and Abbeel, P. Benchmarking deep reinforcement learning for continuous control. In Balcan, M. and Weinberger, K. Q. (eds.), Proceedings of the 33nd International Conference on Machine Learning, ICML 2016, New York City, NY, USA, June 19-24, 2016, volume 48 of JMLR Workshop and Conference Proceedings, pp. 1329-1338. JMLR.org, 2016.
|
| 398 |
+
Fu, M. and Prashanth L, A. Risk-sensitive reinforcement learning: A constrained optimization viewpoint. arXiv preprint arXiv:1810.09126, 2018.
|
| 399 |
+
Fujimoto, S., Conti, E., Ghavamzadeh, M., and Pineau, J. Benchmarking batch deep reinforcement learning algorithms. arXiv preprint arXiv:1910.01708, 2019.
|
| 400 |
+
Gao, Y., Lin, J., Yu, F., Levine, S., and Darrell, T. Reinforcement learning from imperfect demonstrations. In Proceedings of the International Conference on Machine Learning, 2018.
|
| 401 |
+
Garcia, J. and Fernandez, F. Safe exploration of state and action spaces in reinforcement learning. Journal of Artificial Intelligence Research, 45:515-564, 2012.
|
| 402 |
+
Garcia, J. and Fernandez, F. A comprehensive survey on safe reinforcement learning. Journal of Machine Learning Research, 16(1):1437-1480, 2015.
|
| 403 |
+
|
| 404 |
+
Goodfellow, I. J., Pouget-Abadie, J., Mirza, M., Xu, B., Warde-Farley, D., Ozair, S., Courville, A. C., and Bengio, Y. Generative adversarial nets. In Ghahramani, Z., Welling, M., Cortes, C., Lawrence, N. D., and Weinberger, K. Q. (eds.), Advances in Neural Information Processing Systems 27: Annual Conference on Neural Information Processing Systems 2014, December 8-13 2014, Montreal, Quebec, Canada, pp. 2672-2680, 2014.
|
| 405 |
+
Hasanbeig, M., Abate, A., and Kroening, D. Cautious reinforcement learning with logical constraints. arXiv preprint arXiv:2002.12156, 2020.
|
| 406 |
+
Hester, T., Vecerik, M., Pietquin, O., Lanctot, M., Schaul, T., Piot, B., Horgan, D., Quan, J., Sendonaris, A., Osband, I., Dulac-Arnold, G., Agapiou, J. P., Leibo, J. Z., and Gruslys, A. Deep q-learning from demonstrations. In McIlraith, S. A. and Weinberger, K. Q. (eds.), Proceedings of the Thirty-Second AAAI Conference on Artificial Intelligence, (AAAI-18), the 30th innovative Applications of Artificial Intelligence (IAAI-18), and the 8th AAAI Symposium on Educational Advances in Artificial Intelligence (EAAI-18), New Orleans, Louisiana, USA, February 2-7, 2018, pp. 3223-3230. AAAI Press, 2018.
|
| 407 |
+
Ho, J. and Ermon, S. Generative adversarial imitation learning. In Lee, D. D., Sugiyama, M., von Luxburg, U., Guyon, I., and Garnett, R. (eds.), Advances in Neural Information Processing Systems 29: Annual Conference on Neural Information Processing Systems 2016, December 5-10, 2016, Barcelona, Spain, pp. 4565-4573, 2016.
|
| 408 |
+
Jansen, N., Könighofer, B., Junges, S., Serban, A., and Bloem, R. Safe Reinforcement Learning Using Probabilistic Shields (Invited Paper). In Konnov, I. and Kovács, L. (eds.), 31st International Conference on Concurrency Theory (CONCUR 2020), volume 171 of Leibniz International Proceedings in Informatics (LIPics), pp. 3:1-3:16, Dagstuhl, Germany, 2020. Schloss Dagstuhl-Leibniz-Zentrum für Informatik. ISBN 978-3-95977-160-3. doi: 10.4230/LIPics.CONCUR.2020.3.
|
| 409 |
+
Jaques, N., Ghandeharioun, A., Shen, J. H., Ferguson, C., Lapedriza, A., Jones, N., Gu, S., and Picard, R. Way off-policy batch deep reinforcement learning of implicit human preferences in dialog. arXiv preprint arXiv:1907.00456, 2019.
|
| 410 |
+
Junges, S., Jansen, N., Dehnert, C., Topcu, U., and Katoen, J.-P. Safety-constrained reinforcement learning for mdps. In International Conference on Tools and Algorithms for the Construction and Analysis of Systems, pp. 130-146. Springer, 2016.
|
| 411 |
+
Kakade, S. M. and Langford, J. Approximately optimal approximate reinforcement learning. In Sammut, C. and Hoffmann, A. G. (eds.), Machine Learning, Proceedings
|
| 412 |
+
|
| 413 |
+
of the Nineteenth International Conference (ICML 2002), University of New South Wales, Sydney, Australia, July 8-12, 2002, pp. 267-274. Morgan Kaufmann, 2002.
|
| 414 |
+
Koppejan, R. and Whiteson, S. Neuroevolutionary reinforcement learning for generalized control of simulated helicopters. Evolutionary intelligence, 4(4):219-241, 2011.
|
| 415 |
+
Kwon, M., Biyik, E., Talati, A., Bhasin, K., Losey, D. P., and Sadigh, D. When humans aren't optimal: Robots that collaborate with risk-aware humans. In Proceedings of ACM/IEEE International Conference on Human-Robot Interaction, 2020.
|
| 416 |
+
Le, H. M., Voloshin, C., and Yue, Y. Batch policy learning under constraints. In Chaudhuri, K. and Salakhutdinov, R. (eds.), Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pp. 3703-3712. PMLR, 2019.
|
| 417 |
+
Levine, S., Finn, C., Darrell, T., and Abbeel, P. End-to-end training of deep visuomotor policies. The Journal of Machine Learning Research, 17(1):1334-1373, 2016.
|
| 418 |
+
Lillicrap, T. P., Hunt, J. J., Pritzel, A., Heess, N., Erez, T., Tassa, Y., Silver, D., and Wierstra, D. Continuous control with deep reinforcement learning. In Bengio, Y. and LeCun, Y. (eds.), 4th International Conference on Learning Representations, ICLR 2016, San Juan, Puerto Rico, May 2-4, 2016, Conference Track Proceedings, 2016.
|
| 419 |
+
Mnih, V., Kavukcuoglu, K., Silver, D., Graves, A., Antonoglou, I., Wierstra, D., and Riedmiller, M. Playing atari with deep reinforcement learning. arXiv preprint arXiv:1312.5602, 2013.
|
| 420 |
+
Mokhtari, A., Ozdaglar, A. E., and Jadbabaie, A. Escaping saddle points in constrained optimization. In Bengio, S., Wallach, H. M., Larochelle, H., Grauman, K., Cesa-Bianchi, N., and Garnett, R. (eds.), Advances in Neural Information Processing Systems 31: Annual Conference on Neural Information Processing Systems 2018, NeurIPS 2018, December 3-8, 2018, Montréal, Canada, pp. 3633-3643, 2018.
|
| 421 |
+
Mülling, K., Kober, J., Kroemer, O., and Peters, J. Learning to select and generalize striking movements in robot table tennis. The International Journal of Robotics Research, 32(3):263-279, 2013.
|
| 422 |
+
Pathak, S., Ábráham, E., Jansen, N., Tacchella, A., and Katoen, J.-P. A greedy approach for the efficient repair of stochastic models. In NASA Formal Methods Symposium, pp. 295-309. Springer, 2015.
|
| 423 |
+
|
| 424 |
+
Quintía Vidal, P., Iglesias Rodríguez, R., Rodríguez González, M. Á., and Vázquez Regueiro, C. Learning on real robots from experience and simple user feedback. 2013.
|
| 425 |
+
Rajeswaran, A., Kumar, V., Gupta, A., Vezzani, G., Schulman, J., Todorov, E., and Levine, S. Learning complex dexterous manipulation with deep reinforcement learning and demonstrations. In Proceedings of Robotics: Science and Systems, 2017.
|
| 426 |
+
Ross, S., Gordon, G., and Bagnell, D. A reduction of imitation learning and structured prediction to no-regret online learning. In Proceedings of the International Conference on Artificial Intelligence and Statistics, pp. 627-635, 2011.
|
| 427 |
+
Schulman, J., Levine, S., Abbeel, P., Jordan, M. I., and Moritz, P. Trust region policy optimization. In Bach, F. R. and Blei, D. M. (eds.), Proceedings of the 32nd International Conference on Machine Learning, ICML 2015, Lille, France, 6-11 July 2015, volume 37 of JMLR Workshop and Conference Proceedings, pp. 1889-1897. JMLR.org, 2015.
|
| 428 |
+
Schulman, J., Moritz, P., Levine, S., Jordan, M. I., and Abbeel, P. High-dimensional continuous control using generalized advantage estimation. In Bengio, Y. and LeCun, Y. (eds.), 4th International Conference on Learning Representations, ICLR 2016, San Juan, Puerto Rico, May 2-4, 2016, Conference Track Proceedings, 2016.
|
| 429 |
+
Silver, D., Huang, A., Maddison, C. J., Guez, A., Sifre, L., Van Den Driessche, G., Schrittwieser, J., Antonoglou, I., Panneershelvam, V., Lanctot, M., et al. Mastering the game of go with deep neural networks and tree search. Nature, 529(7587):484, 2016.
|
| 430 |
+
Smart, W. D. and Kaelbling, L. P. Practical reinforcement learning in continuous spaces. In Langley, P. (ed.), Proceedings of the Seventeenth International Conference on Machine Learning (ICML 2000), Stanford University, Stanford, CA, USA, June 29 - July 2, 2000, pp. 903-910. Morgan Kaufmann, 2000.
|
| 431 |
+
Srinivasan, K., Eysenbach, B., Ha, S., Tan, J., and Finn, C. Learning to be safe: Deep rl with a safety critic. arXiv preprint arXiv:2010.14603, 2020.
|
| 432 |
+
Stooke, A., Achiam, J., and Abbeel, P. Responsive safety in reinforcement learning by PID lagrangian methods. In Proceedings of the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 9133-9143. PMLR, 2020.
|
| 433 |
+
|
| 434 |
+
Sui, Y., Gotovos, A., Burdick, J. W., and Krause, A. Safe exploration for optimization with gaussian processes. In Bach, F. R. and Blei, D. M. (eds.), Proceedings of the 32nd International Conference on Machine Learning, ICML 2015, Lille, France, 6-11 July 2015, volume 37 of JMLR Workshop and Conference Proceedings, pp. 997-1005. JMLR.org, 2015.
|
| 435 |
+
Sun, W., Gordon, G. J., Boots, B., and Bagnell, J. A. Dual policy iteration. In Bengio, S., Wallach, H. M., Larochelle, H., Grauman, K., Cesa-Bianchi, N., and Garnett, R. (eds.), Advances in Neural Information Processing Systems 31: Annual Conference on Neural Information Processing Systems 2018, NeurIPS 2018, December 3-8, 2018, Montreal, Canada, pp. 7059-7069, 2018.
|
| 436 |
+
Tessler, C., Mankowitz, D. J., and Mannor, S. Reward constrained policy optimization. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net, 2019.
|
| 437 |
+
Thananjeyan, B., Balakrishna, A., Rosolia, U., Li, F., McAllister, R., Gonzalez, J. E., Levine, S., Borrelli, F., and Goldberg, K. Safety augmented value estimation from demonstrations (saved): Safe deep model-based rl for sparse cost robotic tasks. IEEE Robotics and Automation Letters, 5(2):3612-3619, 2020.
|
| 438 |
+
Thananjeyan, B., Balakrishna, A., Nair, S., Luo, M., Srinivasan, K., Hwang, M., Gonzalez, J. E., Ibarz, J., Finn, C., and Goldberg, K. Recovery rl: Safe reinforcement learning with learned recovery zones. IEEE Robotics and Automation Letters, 6(3):4915-4922, 2021.
|
| 439 |
+
Turchetta, M., Berkenkamp, F., and Krause, A. Safe exploration in finite markov decision processes with gaussian processes. In Lee, D. D., Sugiyama, M., von Luxburg, U., Guyon, I., and Garnett, R. (eds.), Advances in Neural Information Processing Systems 29: Annual Conference on Neural Information Processing Systems 2016, December 5-10, 2016, Barcelona, Spain, pp. 4305-4313, 2016.
|
| 440 |
+
Vecerik, M., Hester, T., Scholz, J., Wang, F., Pietquin, O., Piot, B., Heess, N., Rothörl, T., Lampe, T., and Riedmiller, M. Leveraging demonstrations for deep reinforcement learning on robotics problems with sparse rewards. arXiv preprint arXiv:1707.08817, 2017.
|
| 441 |
+
Vinitsky, E., Kreidieh, A., Le Flem, L., Kheterpal, N., Jang, K., Wu, C., Wu, F., Liaw, R., Liang, E., and Bayen, A. M. Benchmarks for reinforcement learning in mixed-autonomy traffic. In Proceedings of Conference on Robot Learning, pp. 399-409, 2018.
|
| 442 |
+
Wachi, A. and Sui, Y. Safe reinforcement learning in constrained markov decision processes. In Proceedings of
|
| 443 |
+
|
| 444 |
+
the 37th International Conference on Machine Learning, ICML 2020, 13-18 July 2020, Virtual Event, volume 119 of Proceedings of Machine Learning Research, pp. 9797-9806. PMLR, 2020.
|
| 445 |
+
Wu, Y., Charoenphakdee, N., Bao, H., Tangkaratt, V., and Sugiyama, M. Imitation learning from imperfect demonstration. In Chaudhuri, K. and Salakhutdinov, R. (eds.), Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pp. 6818-6827. PMLR, 2019.
|
| 446 |
+
Yang, T., Rosca, J., Narasimhan, K., and Ramadge, P. J. Projection-based constrained policy optimization. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net, 2020.
|
| 447 |
+
Zhang, J., Cheung, B., Finn, C., Levine, S., and Jayaraman, D. Cautious adaptation for reinforcement learning in safety-critical settings. In International Conference on Machine Learning, pp. 11055-11065. PMLR, 2020.
|
| 448 |
+
Zhang, R., Torabi, F., Guan, L., Ballard, D. H., and Stone, P. Leveraging human guidance for deep reinforcement learning tasks. In Kraus, S. (ed.), Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI 2019, Macao, China, August 10-16, 2019, pp. 6339-6346. ijcai.org, 2019. doi: 10.24963/ijcai.2019/884.
|
| 449 |
+
Zheng, L. and Ratliff, L. J. Constrained upper confidence reinforcement learning. arXiv preprint arXiv:2001.09377, 2020.
|
acceleratingsafereinforcementlearningwithconstraintmismatchedbaselinepolicies/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d8d2f174fd157d4f502056da4b970ba5ad5f5b7d45150b00a6f6a5e29a7486f1
|
| 3 |
+
size 600919
|
acceleratingsafereinforcementlearningwithconstraintmismatchedbaselinepolicies/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f898b632cb7cc159c71d8270751c2a8853045e069bfb3b9857c9df7aedddf7bb
|
| 3 |
+
size 665823
|
accelerationviafractallearningrateschedules/70131c0f-ec80-4daa-acca-0e567d7e2d43_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6cd986e7265fb501f0f38c7b3ed42ace558d5f5e627a47a180e8ade9dc7a9388
|
| 3 |
+
size 105639
|
accelerationviafractallearningrateschedules/70131c0f-ec80-4daa-acca-0e567d7e2d43_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fe845aff1fac4059fbcfead0e4dbf16bd73dd0d04bf9ad4b439068acd5020eb3
|
| 3 |
+
size 138823
|
accelerationviafractallearningrateschedules/70131c0f-ec80-4daa-acca-0e567d7e2d43_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:982d08479a8665ad40a2825d0601e7b748a9b11a7e70ba7649c0ac904ea4775a
|
| 3 |
+
size 739225
|
accelerationviafractallearningrateschedules/full.md
ADDED
|
@@ -0,0 +1,595 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Naman Agarwal $^{1}$ Surbhi Goel $^{2}$ Cyril Zhang $^{2}$
|
| 2 |
+
|
| 3 |
+
# Abstract
|
| 4 |
+
|
| 5 |
+
In practical applications of iterative first-order optimization, the learning rate schedule remains notoriously difficult to understand and expensive to tune. We demonstrate the presence of these subtleties even in the innocuous case when the objective is a convex quadratic. We reinterpret an iterative algorithm from the numerical analysis literature as what we call the Chebyshev learning rate schedule for accelerating vanilla gradient descent, and show that the problem of mitigating instability leads to a fractal ordering of step sizes. We provide some experiments to challenge conventional beliefs about stable learning rates in deep learning: the fractal schedule enables training to converge with locally unstable updates which make negative progress on the objective.
|
| 6 |
+
|
| 7 |
+
# 1. Introduction
|
| 8 |
+
|
| 9 |
+
In the current era of large-scale machine learning models, a single deep neural network can cost millions of dollars to train. Despite the sensitivity of gradient-based training to the choice of learning rate schedule, no clear consensus has emerged on how to select this high-dimensional hyperparameter, other than expensive end-to-end model training and evaluation. Prior literature indirectly sheds some light on this mystery, showing that the learning rate schedule governs tradeoffs between accelerated convergence and various forms of algorithmic stability.
|
| 10 |
+
|
| 11 |
+
In this work, we highlight the surprising consequences of these tradeoffs in a very simple setting: first-order optimization of a convex quadratic function. We start by pointing out the existence of a non-adaptive step size schedule, derived from the roots of Chebyshev polynomials, which allows plain gradient descent to obtain accelerated convergence rates without momentum. These learning rates overshoot the region of guaranteed local progress, resulting in unsta
|
| 12 |
+
|
| 13 |
+

|
| 14 |
+
Figure 1: Visualization of the Chebyshev nodes $\gamma_{t}$ , their corresponding step sizes $\gamma_{t}^{-1}$ , and the fractal permutation (Lebedev & Finogenov, 1971) studied in this paper.
|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
|
| 20 |
+
ble optimization trajectories. Extending a relatively obscure line of work motivated by numerical imprecision in PDE solvers (Lebedev & Finogenov, 1971), we show that stable acceleration is achieved by selecting a fractal permutation of the Chebyshev step sizes.
|
| 21 |
+
|
| 22 |
+
Acceleration via large step sizes may provide an useful alternative to momentum: it is less stable according to our worst-case bounds, but inherits the memory-efficiency and statelessness of vanilla gradient descent. More broadly, we discuss how this form of acceleration might implicitly present itself in settings like deep learning, introducing hidden entanglements and experimental confounds. We hope that these ideas will lead to new adaptive algorithms which overstep the "edge of stability" (the largest constant learning rate at which model training converges) (Giladi et al., 2019; Cohen et al., 2021), and accelerate training via carefully scheduled negative progress. We provide some supporting experiments towards bridging the theory-practice gap, as well as open questions for future investigation.
|
| 23 |
+
|
| 24 |
+
# 1.1. Our contributions
|
| 25 |
+
|
| 26 |
+
Provisibly stable acceleration without momentum. We revisit an oft-neglected variant of the Chebyshev iteration method for accelerating gradient descent on convex quadratics. In lieu of momentum, it uses a recursively-defined sequence of large step sizes derived from Chebyshev polynomials, which we call the fractal Chebyshev schedule. We prove a new stability guarantee for this algorithm: under bounded perturbations to all the gradients, no iterate changes by more than $O(\mathrm{poly}(\kappa))$ , where $\kappa$ is the condition number of the problem. We also some provide theoretically-grounded practical variants of the schedule, and negative results for function classes beyond convex quadratics.
|
| 27 |
+
|
| 28 |
+
Empirical insights on stable oscillating schedules. We demonstrate empirically that the fractal Chebyshev schedule stabilizes gradient descent on objectives beyond convex quadratics. We observe accelerated convergence on an instance of multiclass logistic regression, and convergent training of deep neural networks at unstable learning rates. These experiments highlight the power of optimizing the "microstructure" of the learning rate schedule (as opposed to global features like warmup and decay). We discuss how these findings connect to other implicit behaviors of SGD and learning rate schedules.
|
| 29 |
+
|
| 30 |
+
# 1.2. Related work
|
| 31 |
+
|
| 32 |
+
The predominant algorithms for accelerated first-order optimization are the momentum methods of Polyak (1964b) and Nesterov (1983). The former, known as the heavy-ball method, only achieves provable acceleration on quadratic objectives. The latter achieves minimax optimal convergence rates for general smooth convex objectives. Both are widely used in practice, far beyond their theoretical scope; for instance, they are the standard options available in deep learning frameworks.
|
| 33 |
+
|
| 34 |
+
Empirical challenges and tradeoffs. (Bottou & Bousquet, 2007) discuss the competing objectives of stability, acceleration, and computation in large-scale settings, where one cannot afford to consider a single asymptotically dominant term. Devolder et al. (2014); Chen et al. (2018); Agarwal et al. (2020b) study this specifically for acceleration. Optimizing the learning rate schedule remains a ubiquitous challenge; see Section 6.2 and Appendix G.2 for references.
|
| 35 |
+
|
| 36 |
+
Numerical methods and extremal polynomials. There are many connections between algorithm design and approximation theory (Vishnoi, 2012; Sachdeva & Vishnoi, 2013). We emphasize that the beautiful idea of the fractal permutation of Chebyshev nodes is an innovation by Lebedev & Finogenov (1971; 1973; 1976); our technical results are generalizations and refinements of the ideas therein. We give an overview of this line of work in Appendix G.1.
|
| 37 |
+
|
| 38 |
+
Learning rate schedules in stochastic optimization. Bias-variance tradeoffs in optimization are studied in various theoretical settings, including quadratics with additive and multiplicative noise (Lan, 2012; Ge et al., 2019; Gorbunov et al., 2020). Many of them also arrive at theoretically principled learning rate schedules; see Appendix G.3. On the more empirical side, Zhang et al. (2019) use a noisy quadratic model to make coarse predictions about the dynamics of large-scale neural net training. Cyclic learning rate schedules have been employed in deep learning, with various heuristic justifications (Loshchilov & Hutter, 2016; Smith, 2017; Fu et al., 2019). In parallel work, (Oymak,
|
| 39 |
+
|
| 40 |
+
2021) considers a cyclic "1 high, $n$ low" schedule, which gives $\log(\kappa)$ convergence rates in the special case of convex quadratics whose Hessians have bimodal spectra. We discuss in Appendix E.5 why this approach does not provide acceleration in the general case; the MNIST experiments in Appendix F.4 include a comparison with this schedule.
|
| 41 |
+
|
| 42 |
+
# 2. Preliminaries
|
| 43 |
+
|
| 44 |
+
# 2.1. Gradient descent
|
| 45 |
+
|
| 46 |
+
We consider the problem of iterative optimization of a differentiable function $f: \mathbb{R}^d \to \mathbb{R}$ , with a first-order oracle $\nabla f: \mathbb{R}^d \to \mathbb{R}^d$ which computes the gradient of $f$ at a query point. The simplest algorithm in this setting is gradient descent, which takes an arbitrary initial iterate $x_1 \in \mathbb{R}^d$ and executes $T$ update steps
|
| 47 |
+
|
| 48 |
+
$$
|
| 49 |
+
\left\{x _ {t + 1} \leftarrow x _ {t} - \eta_ {t} \nabla f (x _ {t}) \right\} _ {t = 1} ^ {T} \tag {1}
|
| 50 |
+
$$
|
| 51 |
+
|
| 52 |
+
according to a learning rate schedule $(\eta_{1},\ldots ,\eta_{T})$ , producing a final iterate $x_{\mathrm{out}}\coloneqq x_{T + 1}$ . When the $\{\eta_t\}$ do not depend on $T$ , an analogous infinite sequence of iterates $\{x_{t}\}_{t\in \mathbb{N}}$ can be defined.
|
| 53 |
+
|
| 54 |
+
There are many ways to choose the learning rate schedule, depending on the structure of $f$ and uncertainty in the gradient oracle. Some schedules are static (non-adaptive): $(\eta_{1},\dots ,\eta_{T})$ are chosen before the execution of the algorithm. For instance, when $f$ is an $M$ -smooth convex function, $\eta_t = 1 / M$ achieves the classical convergence rates.
|
| 55 |
+
|
| 56 |
+
Adaptive choices of $\eta_t$ are allowed to depend on the observed feedback from the current execution (including $x_t$ and $\nabla f(x_t)$ ), and are considerably more expressive. For example, $\eta_t$ can be chosen adaptively via line search, adaptive regularization, or curvature estimation.
|
| 57 |
+
|
| 58 |
+
# 2.2. The special case of quadratics
|
| 59 |
+
|
| 60 |
+
Consider the case where the objective is of the form
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
f (x) = \frac {1}{2} x ^ {\top} A x - b ^ {\top} x,
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
where $A \in \mathbb{R}^{d \times d}$ is symmetric and positive definite, and $b \in \mathbb{R}^d$ , so that $\nabla f(x) = Ax - b$ is an affine function of the query point $x$ . Then, the mapping $\mathcal{G}: x_t \mapsto x_{t+1}$ induced by gradient descent is also affine. Let $x^* := \min f$ (a fixed point of $\mathcal{G}$ ). Then,
|
| 67 |
+
|
| 68 |
+
$$
|
| 69 |
+
\begin{array}{l} x _ {t + 1} - x ^ {*} = \mathcal {G} (x _ {t}) - x ^ {*} = \mathcal {G} (x _ {t}) - \mathcal {G} (x ^ {*}) \\ = (I - \eta_ {t} A) \left(x _ {t} - x ^ {*}\right). \\ \end{array}
|
| 70 |
+
$$
|
| 71 |
+
|
| 72 |
+
By induction, we conclude that
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
x _ {\mathrm {o u t}} - x ^ {*} = \left[ \prod_ {t = 1} ^ {T} (I - \eta_ {t} A) \right] (x _ {1} - x ^ {*}).
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
Thus, the residual after $T$ steps of gradient descent is given by a degree- $T$ matrix polynomial times the initial residual:
|
| 79 |
+
|
| 80 |
+
Definition 1 (Residual polynomial). Fix a choice of non-adaptive $(\eta_1,\dots ,\eta_T)$ . Then, define the residual polynomial $p:\mathbb{R}^{d\times d}\to \mathbb{R}^{d\times d}$ as
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
p (A) := \prod_ {t = 1} ^ {T} (I - \eta_ {t} A).
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
When clear, we will interchange to denote scalar and matrix polynomials with the same coefficients. Thus, overloading $p: \mathbb{R} \to \mathbb{R}$ , we have $p(0) = 1$ , and $p(1 / \eta_t) = 0$ for each $t$ .
|
| 87 |
+
|
| 88 |
+
Remark 2. The matrices in the above product all commute. Thus, when $f$ is quadratic, $p(A)$ (and thus $x_{\mathrm{out}}$ given $x_{1}$ ) does not depend on the permutation of $(\eta_1, \ldots, \eta_T)$ .
|
| 89 |
+
|
| 90 |
+
# 2.3. Chebyshev polynomials and Chebyshev methods
|
| 91 |
+
|
| 92 |
+
The problem of choosing $p(A)$ to optimize convergence for least-squares has roots in numerical methods for differential equations (Richardson, 1911). The Chebyshev polynomials, which appear ubiquitously in numerical methods and approximation theory (Chebyshev, 1853; Mason & Handscomb, 2002), provide a minimax-optimal solution (Flanders & Shortley, 1950; Gavurin, 1950; Young, 1953)<sup>1</sup>: choose positive real numbers $m \leq M$ , and set
|
| 93 |
+
|
| 94 |
+
$$
|
| 95 |
+
p (\lambda) = \frac {\mathcal {T} _ {T} (z)}{\mathcal {T} _ {T} (\theta)},
|
| 96 |
+
$$
|
| 97 |
+
|
| 98 |
+
where $z \coloneqq \frac{M + m - 2\lambda}{M - m}$ , $\theta \coloneqq \frac{M + m}{M - m} = 1 + \frac{2m}{M - m}$ , and $T_{n}(\cdot)$ is the degree- $n$ Chebyshev polynomial of the first kind. One of many equivalent definitions is $T_{n}(z) = \cos (n\arccos z)$ for $|z| \leq 1$ . From this definition it follows that the roots of $p$ occur at the Chebyshev nodes
|
| 99 |
+
|
| 100 |
+
$$
|
| 101 |
+
\gamma_ {t} := \frac {M + m}{2} - \frac {M - m}{2} \cos \frac {(t - \frac {1}{2}) \pi}{T}, t = 1, \ldots , T.
|
| 102 |
+
$$
|
| 103 |
+
|
| 104 |
+
Setting $\{\eta_t\}$ to be any permutation of $\{1 / \gamma_t\}$ suffices to realize this choice of $p$ . Note that $1 / \gamma_{t}$ is decreasing in $t$ . The limiting case $m = M$ is gradient descent with a constant learning rate, and $p(\lambda) = (1 - \lambda /m)^T$ .
|
| 105 |
+
|
| 106 |
+
Let $\lambda_{\min}, \lambda_{\max}$ denote the smallest and largest eigenvalues of $A$ , so that the condition number of $A$ is $\kappa := \lambda_{\max} / \lambda_{\min}$ . Viewing $m, M$ as estimates for the spectrum, we define
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
\widehat {\kappa} := \frac {M}{m} \geq \frac {\lambda_ {\mathrm {m a x}}}{\lambda_ {\mathrm {m i n}}} = \kappa .
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
We state a classic end-to-end convergence rate for Chebyshev iteration (proven in Appendix B for completeness):
|
| 113 |
+
|
| 114 |
+
Theorem 3 (Convergence rate of Chebyshev iteration). Choose spectral estimates $m \leq M$ such that $0 < m \leq \lambda_{\min} \leq \lambda_{\max} \leq M$ . Then, setting $\{\eta_t\}$ to be any permutation of $\{1 / \gamma_t\}$ , the final iterate of gradient descent $x_{\mathrm{out}}$ satisfies the following:
|
| 115 |
+
|
| 116 |
+
$$
|
| 117 |
+
\begin{array}{l} \left\| x _ {\text {o u t}} - x ^ {*} \right\| \leq \frac {2 \rho^ {T}}{1 + \rho^ {2 T}} \left\| x _ {1} - x ^ {*} \right\| \\ \leq e ^ {- \Omega (T) / \sqrt {\hat {\kappa}}} \| x _ {1} - x ^ {*} \|, \\ \end{array}
|
| 118 |
+
$$
|
| 119 |
+
|
| 120 |
+
where $\rho := \frac{\sqrt{M} - \sqrt{m}}{\sqrt{M} + \sqrt{m}} \leq 1 - \Omega\left(\frac{1}{\sqrt{\hat{\kappa}}}\right)$ .
|
| 121 |
+
|
| 122 |
+
Thus, accelerated methods like Chebyshev iteration get $\varepsilon$ -close to the minimizer in $O(\sqrt{\hat{\kappa}}\log(1/\varepsilon))$ iterations, a quadratic improvement over the $O(\hat{\kappa}\log(1/\varepsilon))$ rate of gradient descent with a constant learning rate. Theorem 3 is proven using approximation theory: show that $|p(\lambda)|$ is small on an interval containing the spectrum of $A$ .
|
| 123 |
+
|
| 124 |
+
Definition 4 (Uniform norm on an interval). Let $p: \mathbb{R} \to \mathbb{R}$ and $m \leq M \in \mathbb{R}$ . Define the norm
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
\| p \| _ {[ m, M ]} := \| p \| _ {L _ {\infty} ([ m, M ])} = \max _ {\lambda \in [ m, M ]} | p (\lambda) |.
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
Then, any upper bound on this norm gives rise to a convergence rate like Theorem 3:
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
\left\| x _ {\mathrm {o u t}} - x ^ {*} \right\| \leq \left\| p \right\| _ {[ m, M ]} \cdot \left\| x _ {1} - x ^ {*} \right\|.
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
These can be converted into optimality gaps on $f$ by considering the polynomial $\lambda p^2(\lambda)$ .
|
| 137 |
+
|
| 138 |
+
Moving beyond infinite-precision arithmetic, the optimization literature typically takes the route of Stiefel (1958), establishing a higher-order recurrence which "semi-iteratively" (iteratively, but keeping some auxiliary state) constructs the same final polynomial $p$ . This is the usual meaning of the Chebyshev iteration method, and coincides with Polyak's momentum on quadratics.
|
| 139 |
+
|
| 140 |
+
This is where we depart from the conventional approach.2 We revisit the idea of working directly with the Chebyshev step sizes, giving a different class of algorithms with different trajectories and stability properties.
|
| 141 |
+
|
| 142 |
+
# 3. The fractal Chebyshev schedule
|
| 143 |
+
|
| 144 |
+
In this section, we work in the strongly<sup>3</sup> convex quadratic setting from Section 2.2. Our new contributions on top of the existing theory address the following questions:
|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
Figure 2: Shapes of the Chebyshev step sizes and fractal permutations. Left: Step sizes in sorted order for $M = 1$ , and $m = 1, \frac{1}{2}, \ldots, \frac{1}{20}$ (black to blue). Right: Permuted schedule with $M = 1$ , $m = \frac{1}{20}$ , $T = 128$ (red). Subsequences with strides $\{1, 4, 16, 64\}$ are overlaid, demonstrating self-similarity arising from the interlacing construction.
|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
|
| 151 |
+
(1) How noise-tolerant is gradient descent with Chebyshev learning rates, beyond numerical imprecision?
|
| 152 |
+
(2) How do we choose the ordering of steps?
|
| 153 |
+
|
| 154 |
+
We first introduce the construction originally motivated by numerical error, which provides an initial answer to (2). Then, our extended robustness analysis provides an answer to (1), and subsequently a more refined answer to (2).
|
| 155 |
+
|
| 156 |
+
# 3.1. Construction
|
| 157 |
+
|
| 158 |
+
We begin with the construction from (Lebedev & Finogenov, 1971), defined below and visualized in Figure 2.
|
| 159 |
+
|
| 160 |
+
Definition 5 (Fractal Chebyshev schedule). Let $\sigma_{1} := [1]$ and for each $T \geq 1$ a power of 2, define
|
| 161 |
+
|
| 162 |
+
$$
|
| 163 |
+
\sigma_ {2 T} := \operatorname {i n t e r l a c e} \left(\sigma_ {T}, 2 T + 1 - \sigma_ {T}\right),
|
| 164 |
+
$$
|
| 165 |
+
|
| 166 |
+
where
|
| 167 |
+
|
| 168 |
+
interlace([a1...an],[b1...bn]):=[a1b1a2b2...anbn].
|
| 169 |
+
|
| 170 |
+
Then, for given $m \leq M$ , and $T$ a power of 2, the fractal Chebyshev schedule is the sequence of learning rates
|
| 171 |
+
|
| 172 |
+
$$
|
| 173 |
+
\eta_ {t} := 1 / \gamma_ {\sigma_ {T} (t)}, \quad t = 1, \dots , T.
|
| 174 |
+
$$
|
| 175 |
+
|
| 176 |
+
Below are the first few nontrivial permutations $\sigma_T$ :
|
| 177 |
+
|
| 178 |
+
$$
|
| 179 |
+
\sigma_ {2} = [ 1 2 ],
|
| 180 |
+
$$
|
| 181 |
+
|
| 182 |
+
$$
|
| 183 |
+
\sigma_ {4} = \left[ \begin{array}{c c c} 1 & 4 & 2 & 3 \end{array} \right],
|
| 184 |
+
$$
|
| 185 |
+
|
| 186 |
+
$$
|
| 187 |
+
\sigma_ {8} = [ 1 8 4 5 2 7 3 6 ],
|
| 188 |
+
$$
|
| 189 |
+
|
| 190 |
+
$$
|
| 191 |
+
\sigma_ {1 6} = \left[ 1 1 6 8 9 4 1 3 5 1 2 2 1 5 7 1 0 3 1 4 6 1 1 \right].
|
| 192 |
+
$$
|
| 193 |
+
|
| 194 |
+
# 3.2. Basic properties
|
| 195 |
+
|
| 196 |
+
We first list some basic facts about the unordered step sizes:
|
| 197 |
+
|
| 198 |
+
Proposition 6. For all $m < M$ and $T$ , the fractal Chebyshev step sizes $\{\gamma_t^{-1}\}$ satisfy the following:
|
| 199 |
+
|
| 200 |
+
(i) $\frac{1}{M} < \gamma_t^{-1} < \frac{1}{m} = \frac{\widehat{\kappa}}{M}.$
|
| 201 |
+
|
| 202 |
+
(ii) The number of step sizes greater than $\frac{2}{M}$ is $\left(\frac{1}{2} -\varepsilon\right)T$ where $0\leq \varepsilon \leq O(1 / \widehat{\kappa})$ as $\widehat{\kappa}\rightarrow \infty$
|
| 203 |
+
|
| 204 |
+
(iii) For $t \leq \frac{T}{2}$ , we have $\gamma_t^{-1} < \frac{1}{m + \frac{2(M - m)t^2}{T^2}}$ , and
|
| 205 |
+
|
| 206 |
+
$$
|
| 207 |
+
\frac {1}{T} \sum_ {t = 1} ^ {T} \gamma_ {t} ^ {- 1} = \frac {\tanh \left(T \operatorname {a c o s h} \left(\frac {2 m}{M - m}\right)\right)}{\sqrt {M m}} < \frac {1}{\sqrt {M m}} = \frac {\sqrt {\kappa}}{M}.
|
| 208 |
+
$$
|
| 209 |
+
|
| 210 |
+
Interpreting $m,M$ as estimates for $\lambda_{\mathrm{min}},\lambda_{\mathrm{max}}$ ..
|
| 211 |
+
|
| 212 |
+
(i) Every step size in the schedule exceeds the classic fixed learning rate of $1 / \lambda_{\mathrm{max}}$ . As $T$ gets large, the largest step approaches $1 / \lambda_{\mathrm{min}}$ , a factor of $\kappa$ larger.
|
| 213 |
+
(ii) For large $\kappa$ , close to half of the step sizes overshoot the stable regime $\eta \in [0,2 / \lambda_{\mathrm{max}}]$ , where local progress on $f$ is guaranteed.
|
| 214 |
+
(iii) The large steps are neither highly clustered nor dispersed. The largest $\gamma_t^{-1}$ overshoots the stable regime by a factor of $\Theta(\kappa)$ , but the average factor is only $O(\sqrt{\kappa})$ .
|
| 215 |
+
|
| 216 |
+
Next, some basic observations about the fractal schedule:
|
| 217 |
+
|
| 218 |
+
Proposition 7 (Hierarchy and self-similarity). For all $m, M, T$ and $0 \leq i \leq \log_2 T$ :
|
| 219 |
+
|
| 220 |
+
(i) The largest $\frac{T}{2^i}$ steps $\eta_t$ in the fractal Chebyshev schedule occur when $t = 1 + 2^i (\tau - 1)$ , with $\tau = 1, \ldots, \frac{T}{2^i}$ .
|
| 221 |
+
(ii) The subsampled sequence $\{\eta_{1 + 2^i (\tau -1)}\}$ has the same ordering as the fractal permutation of the same length:
|
| 222 |
+
|
| 223 |
+
$$
|
| 224 |
+
\eta_ {1 + 2 ^ {i} \tau} = \gamma_ {1 + 2 ^ {i} (\tau^ {\prime} - 1)} ^ {- 1}, \quad \text {w h e r e} \tau^ {\prime} = \sigma_ {T / 2 ^ {i}} (\tau).
|
| 225 |
+
$$
|
| 226 |
+
|
| 227 |
+
Figure 2 visualizes these observations, while Appendix D.1 contains formal statements and proofs.
|
| 228 |
+
|
| 229 |
+
# 3.3. Self-stabilization via infix polynomial bounds
|
| 230 |
+
|
| 231 |
+
Now, let us examine why the fractal ordering is needed. As discussed, in the noiseless infinite-precision setting, the final iterate $x_{\mathrm{out}}$ is invariant to the permutation of $\{\eta_t\}$ . However, the intermediate iterates $x_{t}$ depend on a sequence of partial products, which depend very sensitively on the permutation; Figure 3 illustrates these tradeoffs; details are found in Appendix F.1.
|
| 232 |
+
|
| 233 |
+

|
| 234 |
+
Figure 3: The optimization trajectories of various permutations of the Chebyshev step sizes. Left: In the noiseless case, the final iterates coincide, but $x_{t}$ can wander exponentially far away. Right: With (i.i.d. Gaussian) noise, there is a tradeoff between $\| x_{t}\|$ and the stability of $x_{\mathrm{out}}$ .
|
| 235 |
+
|
| 236 |
+
We motivate our first new results using an additive noise model; this is a refinement of (Lebedev & Finogenov, 1971; 1973; 1976), which are only concerned with preventing exponential blowup of negligible perturbations at the numerical noise floor. We consider adding a sequence of perturbations $(\xi_1,\dots ,\xi_T)$ to gradient descent (Equation 1):
|
| 237 |
+
|
| 238 |
+
$$
|
| 239 |
+
\left\{x _ {t + 1} \leftarrow x _ {t} - \eta_ {t} \nabla f \left(x _ {t}\right) + \xi_ {t} \right\} _ {t = 1} ^ {T}. \tag {2}
|
| 240 |
+
$$
|
| 241 |
+
|
| 242 |
+
Note that this captures an inexact (e.g. stochastic) gradient oracle $\widetilde{\nabla} f(\cdot)$ , in which case
|
| 243 |
+
|
| 244 |
+
$$
|
| 245 |
+
\xi_ {t} = \eta_ {t} \left(\nabla f \left(x _ {t}\right) - \widetilde {\nabla f} \left(x _ {t}\right)\right). \tag {3}
|
| 246 |
+
$$
|
| 247 |
+
|
| 248 |
+
Unrolling the recursion, we get:
|
| 249 |
+
|
| 250 |
+
$$
|
| 251 |
+
x _ {2} - x ^ {*} = (I - \eta_ {1} A) \left(x _ {1} - x ^ {*}\right) + \xi_ {1},
|
| 252 |
+
$$
|
| 253 |
+
|
| 254 |
+
$$
|
| 255 |
+
x _ {3} - x ^ {*} = (I - \eta_ {2} A) [ (I - \eta_ {1} A) (x _ {1} - x ^ {*}) + \xi_ {1} ] + \xi_ {2},
|
| 256 |
+
$$
|
| 257 |
+
|
| 258 |
+
···
|
| 259 |
+
|
| 260 |
+
$$
|
| 261 |
+
x _ {t} - x ^ {*} = p _ {1: t - 1} (A) \left(x _ {1} - x ^ {*}\right) + \sum_ {t ^ {\prime} = 2} ^ {t} p _ {t ^ {\prime}: t - 1} (A) \xi_ {t ^ {\prime} - 1},
|
| 262 |
+
$$
|
| 263 |
+
|
| 264 |
+
where we have defined the infix polynomial as the (possibly empty) product
|
| 265 |
+
|
| 266 |
+
$$
|
| 267 |
+
p _ {s: t} (A) := \prod_ {\tau = s} ^ {t} (I - \eta_ {\tau} A).
|
| 268 |
+
$$
|
| 269 |
+
|
| 270 |
+
Lebedev & Finogenov (1971) give bounds on the norms of the prefix polynomials $p_{1:t}$ and suffix polynomials $p_{s:T}$ :
|
| 271 |
+
|
| 272 |
+
Theorem 8 (Prefix and suffix bounds). For a fractal Chebyshev schedule with $m, M, T$ , and all $1 \leq s \leq t \leq T$ :
|
| 273 |
+
|
| 274 |
+
(i) $\| p_{1:t}\|_{[m,M]}\leq \frac{\widehat{\kappa} - 1}{4^{\min(\mathrm{bits}(t))}}\prod_{j\in \mathrm{bits}'(t)}\frac{2}{1 + \mathcal{T}_{2j}(\theta)};$
|
| 275 |
+
(ii) $\| p_{s:T}\|_{[m,M]}\leq \prod_{j\in \mathrm{bits}(T + 1 - s)}\frac{2}{1 + \mathcal{T}_{2^j}(\theta)},$
|
| 276 |
+
|
| 277 |
+
where $\mathrm{bits}(n)$ denotes the sequence $j_{1} > j_{2} > \ldots >j_{k}$ of indices in the binary expansion of $n$ , and $\mathrm{bits}'(n)\coloneqq$
|
| 278 |
+
|
| 279 |
+
bits $(n)\backslash j_{k}$ .For example, when $n = 6 = 2^{2} + 2^{1}$ ,bits $(n) =$ $\{2,1\}$ , and bits' $(n) = \{2\}$
|
| 280 |
+
|
| 281 |
+
Let $\mathcal{V}(\cdot),\mathcal{V}'(\cdot)$ denote the bounds from Theorem 8, so that $\| p_{1:t}\|_{[m,M]}\leq \mathcal{V}'(t)$ , and $\| p_{s:T}\|_{[m,M]}\leq \mathcal{V}(T + 1 - s)$
|
| 282 |
+
|
| 283 |
+
Notice that $\mathcal{V}(t) \leq \frac{2}{1 + \mathcal{T}_{[t/2]}(\theta)} \leq e^{-\Omega(t)/\sqrt{\widehat{\kappa}}}$ for all $t \geq 1$ , and $\mathcal{V}'(t) \leq \widehat{\kappa} \mathcal{V}(t)$ .
|
| 284 |
+
|
| 285 |
+
To fully understand the propagation of $\xi_{t}$ through Equation 2, we provide bounds on the infix polynomial norms:
|
| 286 |
+
|
| 287 |
+
Theorem 9 (Infix polynomial bounds). For the fractal Chebyshev schedule with $m, M, T$ , and all $1 \leq s \leq t \leq T$ :
|
| 288 |
+
|
| 289 |
+
$$
|
| 290 |
+
\left\| p _ {s: t} \right\| _ {[ m, M ]} \leq \mathcal {V} (\zeta + 1 - s) \cdot \mathcal {V} ^ {\prime} (t - \zeta),
|
| 291 |
+
$$
|
| 292 |
+
|
| 293 |
+
where $\zeta$ is the index such that $s - 1\leq \zeta \leq t$ and $\zeta ,\zeta +1$ differ at the most significant bit.
|
| 294 |
+
|
| 295 |
+
Then, analyzing the decay of $\mathcal{V},\mathcal{V}^{\prime}$ , we derive cumulative error bounds:
|
| 296 |
+
|
| 297 |
+
Theorem 10 (Infix series bounds). For a fractal Chebyshev schedule with $m, M, T$ , and all $1 \leq s \leq t \leq T$ :
|
| 298 |
+
|
| 299 |
+
$$
|
| 300 |
+
\sum_ {t ^ {\prime} = s} ^ {t} \| p _ {t ^ {\prime}: t} \| _ {[ m, M ]} \leq O \left(\widehat {\kappa} ^ {1 + \frac {1}{\ln 4}} \log \widehat {\kappa}\right) = o \left(\widehat {\kappa} ^ {1. 7 3}\right).
|
| 301 |
+
$$
|
| 302 |
+
|
| 303 |
+
This bound, a sum of up to $T$ terms, is independent of $T$ .
|
| 304 |
+
|
| 305 |
+
These require generalizations of the combinatorial proofs for Theorem 8, presented (along with more precise statements) in Appendices D.2 and D.3.
|
| 306 |
+
|
| 307 |
+
# 3.4. Implications for gradient descent
|
| 308 |
+
|
| 309 |
+
Theorem 10 translates to the following end-to-end statement about gradient descent with the fractal schedule:
|
| 310 |
+
|
| 311 |
+
Corollary 11. Suppose $0 < m \leq \lambda_{\min} \leq \lambda_{\max} \leq M$ . Then, gradient descent with the fractal Chebyshev schedule of length $T$ , and perturbations (as in Equation 2) such that $\|\xi_t\| \leq \varepsilon$ , outputs iterates $x_t$ satisfying
|
| 312 |
+
|
| 313 |
+
$$
|
| 314 |
+
\left\| x _ {t + 1} - x ^ {*} \right\| \leq \left\| p _ {1: t} \right\| _ {[ m, M ]} \cdot \left\| x _ {1} - x ^ {*} \right\| + o \left(\widehat {\kappa} ^ {1. 7 3}\right) \cdot \varepsilon .
|
| 315 |
+
$$
|
| 316 |
+
|
| 317 |
+
Recall that Theorems 8 and 3 guarantee
|
| 318 |
+
|
| 319 |
+
$$
|
| 320 |
+
\begin{array}{l} \left\| p _ {1: t} \right\| _ {[ m, M ]} \leq e ^ {- \Omega (T) \cdot \log (\hat {\kappa}) / \sqrt {\hat {\kappa}}} \\ \left\| p _ {1: T} \right\| _ {[ m, M ]} \leq e ^ {- \Omega (T) / \sqrt {\hat {\kappa}}}. \\ \end{array}
|
| 321 |
+
$$
|
| 322 |
+
|
| 323 |
+
The fractal schedule allows the stability factor to be independent of $T$ . When the perturbations arise from noisy gradients (as in Equation 3), so that each $\xi_{t}$ is $\eta_t\varepsilon$ -bounded, this factor becomes $o(\hat{\kappa}^{2.73})$ .
|
| 324 |
+
|
| 325 |
+
Provable benefit of negative progress. A striking fact about the fractal Chebyshev schedule is that this non-adaptive method provably beats the minimax convergence rate of line search, the most fundamental adaptive algorithm in this setting (Boyd & Vandenberghe, 2004):
|
| 326 |
+
|
| 327 |
+
$$
|
| 328 |
+
\eta_ {t} ^ {\left(\mathrm {l s}\right)} := \underset {\eta \geq 0} {\arg \min } f \left(x _ {t} - \eta \nabla f \left(x _ {t}\right)\right). \tag {4}
|
| 329 |
+
$$
|
| 330 |
+
|
| 331 |
+
Proposition 12 (No acceleration from line search). On a strongly convex quadratic objective $f(x) = \frac{1}{2} x^{\top}Ax + b^{\top}x$ , let $\{x_{t}\}$ be the sequence of iterates of gradient descent with the adaptive learning rate schedule $\eta_t^{(\mathrm{ls})}$ from Equation 4. Then, for each $A, b$ , there exists a setting of $x_{1}$ such that
|
| 332 |
+
|
| 333 |
+
$$
|
| 334 |
+
\left\| x _ {t + 1} - x ^ {*} \right\| \geq \left(1 - \frac {1}{\Omega (\kappa)}\right) ^ {T} \left\| x _ {1} - x ^ {*} \right\|, \quad \forall t \geq 1.
|
| 335 |
+
$$
|
| 336 |
+
|
| 337 |
+
This is a classic fact; for a complete treatment, see Section 3.2.2 of (Kelley, 1999). In the context of our results, it shows that greedily selecting the locally optimal learning rates is provably suboptimal, even compared to a feedback-independent policy.
|
| 338 |
+
|
| 339 |
+
Adaptive estimation of the local loss curvature is an oft-attempted approach, amounting to finding the best conservative step size $\frac{1}{M}$ . Proposition 12 suggests that although such methods have numerous advantages, greedy local methods can miss out on acceleration. The fact that acceleration can be obtained from carefully scheduled overshooting is reminiscent of simulated annealing (Aarts & Korst, 1989), though we could not find any rigorous connections.
|
| 340 |
+
|
| 341 |
+
Comparison with momentum. We stress that this form of acceleration does not replace or dominate momentum. The dependence of the stability term on $\widehat{\kappa}$ is suboptimal (Devolder et al., 2014). In exchange, we get a memoryless acceleration algorithm: gradient descent has no auxiliary variables or multi-term recurrences, so that $x_{t}$ fully specifies the state. This bypasses the subtleties inherent in restarting stateful optimizers (O'Donoghue & Candes, 2015; Loshchilov & Hutter, 2016).
|
| 342 |
+
|
| 343 |
+
Finally, our theory (especially Theorem 14) implies that experiments attempting to probe the acceleration benefits of momentum might be confounded by the learning rate schedule, even in the simplest of settings (thus, certainly also in more complicated settings, like deep learning).
|
| 344 |
+
|
| 345 |
+
# 3.5. Brief overview of proof ideas
|
| 346 |
+
|
| 347 |
+
Figure 3 suggests that there is a tradeoff between taking large $\Omega(1/m)$ steps for acceleration vs. small $O(1/M)$ steps for stability. To get acceleration, we must take all of the large steps in the schedule. However, we must space them out: taking $k = o(T)$ of the largest steps consecutively
|
| 348 |
+
|
| 349 |
+
incurs an exponential blowup in the infix polynomial:
|
| 350 |
+
|
| 351 |
+
$$
|
| 352 |
+
\prod_ {i = 1} ^ {k} \left\| \left(1 - \frac {\lambda}{\gamma_ {i}}\right) \right\| _ {[ m, M ]} \approx \left\| \left(1 - \frac {\lambda}{m}\right) ^ {k} \right\| _ {[ m, M ]} = (\widehat {\kappa} - 1) ^ {k}.
|
| 353 |
+
$$
|
| 354 |
+
|
| 355 |
+
The difficulty arises from the fact that there are not enough small steps in the schedule, so that a large step will need to be stabilized by internal copies of Chebyshev iteration. This is why the fractal schedule is necessary. Theorem 9 shows that this is surprisingly possible: the fractal schedule is only as unstable as the largest single step.
|
| 356 |
+
|
| 357 |
+
This intuition does not get us very far towards an actual proof: the internal copies of Chebyshev iteration, which form a complete binary tree, are "skewed" in a way that is sometimes better, sometimes worse. Isolating a combinatorial tree exchange lemma used to prove Theorem 8, we can iteratively swap two special infix polynomials with two others, and localize "bad skewness" to only one large step. Theorem 9 follows from decomposing each infix into two infixes amenable to the tree exchange procedure. Theorem 10 follows by combining Theorem 9 with sharpened generalizations of the original paper's series bounds.
|
| 358 |
+
|
| 359 |
+
The proofs involve delicate trigonometric inequalities and various interesting facts about the geometry of polynomials. Appendices B, C, and D build up to self-contained proofs.
|
| 360 |
+
|
| 361 |
+
# 4. Extensions and variants
|
| 362 |
+
|
| 363 |
+
Next, we explore some theoretically justified variants.
|
| 364 |
+
|
| 365 |
+
# 4.1. Useful transformations of the fractal schedule
|
| 366 |
+
|
| 367 |
+
Reversing the schedule. Notice that the first step $\eta_{1}$ is the largest step in the schedule. This might not be desirable when $\xi_{t}$ is proportional to $\| x - x^{*}\|$ (like in linear regression with minibatch SGD noise). It is a simple consequence of the symmetries in the main theorems that reversing the fractal Chebyshev schedule produces a contractive variant:
|
| 368 |
+
|
| 369 |
+
Proposition 13. Suppose we run gradient descent with the reversed fractal Chebyshev schedule $\sigma_T(T + 1 - t)$ . Then:
|
| 370 |
+
|
| 371 |
+
(i) For any $1 \leq t < t' \leq T$ , we have
|
| 372 |
+
|
| 373 |
+
$$
|
| 374 |
+
\overline {{\| p _ {1 : t} \|}} _ {[ m, M ]} \leq \overline {{\| p _ {1 : t ^ {\prime}} \|}} _ {[ m, M ]} \leq 1,
|
| 375 |
+
$$
|
| 376 |
+
|
| 377 |
+
where $\overline{\|\cdot\|}$ denotes the corresponding suffix norm bound from Theorem 8 (ii).
|
| 378 |
+
|
| 379 |
+
(ii) The bounds from Theorem 8 are swapped: replace $(p_{1:t}, p_{s:T}) \to (p_{T+1-t:T}, p_{1:T+1-s})$ .
|
| 380 |
+
|
| 381 |
+
(iii) Theorem 9 holds, swapping $\mathcal{V} \leftrightarrow \mathcal{V}'$ . Theorem 10 holds.
|
| 382 |
+
|
| 383 |
+
Concatenating schedules. One can also repeat the fractal Chebyshev schedule indefinitely.<sup>4</sup> Note that each infix polynomial of a repeated schedule can be written as a product of one prefix $p_{1:t}$ , one suffix $p_{s:T}$ , and a power of $p_{1:T}$ , so stability bounds analogous to Theorems 9 and 10 follow straightforwardly. It is also possible to concatenate schedules with different lengths $T$ . Choosing $T$ to be successive powers of 2, one obtains an infinitely long schedule suitable for unknown time horizons.
|
| 384 |
+
|
| 385 |
+
# 4.2. Conservative overstepping and partial acceleration
|
| 386 |
+
|
| 387 |
+
In this section, we decouple the eigenvalue range $[\lambda_{\mathrm{min}},\lambda_{\mathrm{max}}]$ from the Chebyshev node range $[m,M]$ used in constructing the schedule. This can simply arise from an incorrect estimation of the eigenvalue range. However, more interestingly, if we think of $[m,M]$ as purposefully omitting the lower spectrum of $A$ (and thus taking smaller large steps), this allows us to interpolate between the fractal Chebyshev schedule and the vanilla constant learning rate.
|
| 388 |
+
|
| 389 |
+
Easy cases. If $m < \lambda_{\min}$ or $M > \lambda_{\max}$ , then $[m, M]$ is still an interval containing the spectrum of $A$ ; it is simply the case that convergence rates and stability bounds will depend on a worse $\widehat{\kappa} > \kappa$ . On the other hand, if $M < \lambda_{\max}$ , the residual blows up exponentially.
|
| 390 |
+
|
| 391 |
+
The subtle case is when $m > \lambda_{\mathrm{min}}$ , when we are overstepping with restraint, trading off acceleration for stability via more conservative step sizes. This requires us to reason about $\| p\|_{[\lambda_{\mathrm{min}},M]}$ when $p$ was constructed to shrink $\| p\|_{[m,M]}$ . Analyzing this case, we get partial acceleration:
|
| 392 |
+
|
| 393 |
+
Theorem 14. Given a quadratic objective with matrix $A$ and $0 < \lambda_{\min} \leq m \leq \lambda_{\max} \leq M$ , gradient descent with the Chebyshev step sizes results in the following convergence guarantee:
|
| 394 |
+
|
| 395 |
+
$$
|
| 396 |
+
\left\| x _ {\text {o u t}} - x ^ {*} \right\| \leq 2 \left(1 - \phi^ {- 1} \left(\lambda_ {\min }, m, M\right)\right) ^ {T} \cdot \left\| x _ {1} - x ^ {*} \right\|,
|
| 397 |
+
$$
|
| 398 |
+
|
| 399 |
+
with
|
| 400 |
+
|
| 401 |
+
$$
|
| 402 |
+
\begin{array}{l} \phi^ {- 1} (\lambda_ {\min }, m, M) \\ := 2 \cdot \frac {\lambda_ {\mathrm {m i n}} + \sqrt {M m} - \sqrt {(M - \lambda_ {\mathrm {m i n}}) (m - \lambda_ {\mathrm {m i n}})}}{(\sqrt {M} + \sqrt {m}) ^ {2}}. \\ \end{array}
|
| 403 |
+
$$
|
| 404 |
+
|
| 405 |
+
This is an interpolation between the standard and accelerated convergence rates of $O(\kappa \log(1/\varepsilon))$ and $O(\sqrt{\kappa} \log(1/\varepsilon))$ . Figure 4 shows the shape of $\phi$ for $m \in [\lambda_{\min}, M]$ , as it ranges from $\sim \sqrt{\kappa} \rightarrow \kappa$ .
|
| 406 |
+
|
| 407 |
+

|
| 408 |
+
Figure 4: Summary of the discussion in Section 4.2. Suboptimal decay times $\phi (\lambda_{\mathrm{min}} = 0.01,m,M = 1)$ interpolate between the standard and accelerated rates. Green curves correspond to settings of $m < \lambda_{\mathrm{min}}$ where Theorem 3 applies; notice the distorted horizontal scale.
|
| 409 |
+
|
| 410 |
+

|
| 411 |
+
|
| 412 |
+
# 4.3. Existence of clairvoyant non-adaptive schedules
|
| 413 |
+
|
| 414 |
+
Finally, we present one more view on the provable power of tuning (i.e. searching globally for) a learning rate schedule on a fixed problem instance. An ambitious benchmark is the conjugate gradient method (Hestenes & Stiefel, 1952), which is optimal for every (rather than the worst-case) choice of $A, b$ . That is, at iteration $t$ , it outputs
|
| 415 |
+
|
| 416 |
+
$$
|
| 417 |
+
x_{t + 1}:= \operatorname *{arg min}_{\substack{\deg p\leq t\\ p(0) = 1}}\| p(A)(x_{1} - x^{*})\|_{A},
|
| 418 |
+
$$
|
| 419 |
+
|
| 420 |
+
where $\| x\| _A\coloneqq \sqrt{x^\top Ax}$ . This can be much stronger than the guarantee from Theorem 3 (e.g. when the eigenvalues of $A$ are clustered). In Appendix E.3, we prove that there are non-adaptive (but instance-dependent) learning rate schedules that compete with conjugate gradient:
|
| 421 |
+
|
| 422 |
+
Theorem 15 (Conjugate gradient schedule; informal). For every problem instance $(A, b)$ , there is a learning rate schedule $\{\eta_t\}$ for gradient descent, with each $\eta_t \in [\frac{1}{\lambda_{\max}}, \frac{1}{\lambda_{\min}}]$ , such that $x_{\mathrm{out}}$ is the output of conjugate gradient.
|
| 423 |
+
|
| 424 |
+
# 5. Beyond convex quadratics
|
| 425 |
+
|
| 426 |
+
# 5.1. General convex objectives: a counterexample
|
| 427 |
+
|
| 428 |
+
A mysterious fact about acceleration is that some algorithms and analyses transfer from the quadratic case to general convex functions, while others do not. (Lessard et al., 2016) exhibit a smooth and strongly convex non-quadratic $f$ for which Polyak's momentum gets stuck in a limit cycle.
|
| 429 |
+
|
| 430 |
+
For us, $f(x) = \log \cosh (x) + 0.01x^{2}$ serves as a one-dimensional "proof by simulation" that gradient descent with the fractal Chebyshev schedule can fail to converge. This is shown in Appendix F.2; note that this is a tiny instance of ridge logistic regression.
|
| 431 |
+
|
| 432 |
+
# 5.2. Non-convex objectives: a no-go
|
| 433 |
+
|
| 434 |
+
None of this theory carries over to worst-case non-convex $f$ : the analogue of Theorem 15 is vacuously strong. We point out that global optimization of the learning rate schedule is information-theoretically intractable.
|
| 435 |
+
|
| 436 |
+
Proposition 16 (Non-convex combination lock; informal). For every "passcode" $\{\eta_1^*,\ldots ,\eta_T^*\}$ and $\delta >0$ , there is a smooth non-convex optimization problem instance $(f(\cdot),x_{1})$ for which the final iterate $x_{\mathrm{out}}$ of gradient descent is an 1-approximate global minimum only if
|
| 437 |
+
|
| 438 |
+
$$
|
| 439 |
+
\left| \eta_ {t} - \eta_ {t} ^ {*} \right| \leq \delta , \quad \forall t = 1, \dots , T.
|
| 440 |
+
$$
|
| 441 |
+
|
| 442 |
+
A formal statement and proof are given in Appendix E.4.
|
| 443 |
+
|
| 444 |
+
# 5.3. More heuristic building blocks
|
| 445 |
+
|
| 446 |
+
With Polyak momentum as the most illustrious example, an optimizer can be very useful beyond its original theoretical scope. We present some more ideas for heuristic variants (unlike the theoretically justified ones from Section 4):
|
| 447 |
+
|
| 448 |
+
Cheap surrogates for the fractal schedule. The worst-case guarantees for Chebyshev methods depend sensitively on the choice of nodes. However, beyond worst-case objectives, it might suffice to replace $\{\gamma_t^{-1}\}$ with any similarly-shaped distribution (like the triangular one considered by (Smith, 2017)), and $\sigma$ with any sequence that sufficiently disperses the large steps. We show in Appendix E.5 that acceleration cannot arise from the simple cyclic schedule from (Oymak, 2021). An intriguing question is whether adaptive gradient methods or the randomness of SGD implicitly causes partial acceleration, alongside other proposed "side effect" mechanisms (Keskar et al., 2016; Jin et al., 2017; Staib et al., 2019).
|
| 449 |
+
|
| 450 |
+
Inserting slow steps. We can insert any number of steps $\eta \in [0,\frac{2}{M} ]$ at any point in a schedule without worsening stability or convergence, because $\| (1 - \eta \lambda)\|_{[m,M]}\leq 1$ . That is, $\| p_{s't'}\|$ in the supersequence is bounded by the corresponding $\| p_{s:t}\|$ in the original schedule, and Theorems 9 and 10 apply. A special case of this is warmup or burn-in: take any number of small steps at the beginning.
|
| 451 |
+
|
| 452 |
+
Another option is to insert the small steps cyclically: notice from Propositions 6 (ii) and 7 (i) that the steps $\{\eta_t\}$ come in "fast-slow" pairs: an odd step overshoots, and an even step corrects it. This suggests further heuristics, like the following "Chebyshevian waltz": in minibatch SGD, run triplets of iterations with step sizes $(\eta_{2t - 1},\eta_{2t},\frac{1}{M})$
|
| 453 |
+
|
| 454 |
+

|
| 455 |
+
Figure 5: Logistic regression/MNIST training loss curves. Left: Standard algorithms, with constant (more opaque = larger) learning rates. Right: A fractal Chebyshev schedule.
|
| 456 |
+
|
| 457 |
+

|
| 458 |
+
|
| 459 |
+

|
| 460 |
+
Figure 6: ResNet-18/CIFAR-10 training with batch size 8192 and a repeated $T = 8$ fractal Chebyshev schedule. Left: Training loss curves. Right: Learning rates; the schedule pokes through the edge of stability (magenta and red) without destabilizing training.
|
| 461 |
+
|
| 462 |
+

|
| 463 |
+
|
| 464 |
+
theory, this degrades the worst-case convergence rate by a constant factor, but improves stability by a constant factor.
|
| 465 |
+
|
| 466 |
+
# 6. Experiments
|
| 467 |
+
|
| 468 |
+
# 6.1. Convex problems and non-local progress
|
| 469 |
+
|
| 470 |
+
In spite of the simple negative result in Section 5.1, we find that the fractal Chebyshev schedule can exhibit accelerated convergence beyond quadratic objectives. Figure 5 shows training curves for logistic regression for MNIST classification; details are in Appendix F.3. We leave a theoretical characterization of the schedule's acceleration properties on general convex functions to future work; this may require further assumptions on "natural" problem instances beyond minimax bounds.
|
| 471 |
+
|
| 472 |
+
# 6.2. Beyond the edge of stability in deep learning
|
| 473 |
+
|
| 474 |
+
We provide a small set of deep learning experiments, finding that the fractal Chebyshev schedule can overstep the empirical "edge of stability" (i.e. the largest constant multiplier on the learning rate for which training does not diverge). Figure 6 gives an overview of these findings; details are in Appendix F.4.
|
| 475 |
+
|
| 476 |
+
Estimating the scale of $\lambda_{\mathrm{max}}(\nabla^2 f)$ is an old paradigm for selecting learning rates (LeCun et al., 1992; Schaul et al.,
|
| 477 |
+
|
| 478 |
+
2013); there are many proposed mechanisms for the success of larger learning rates. Our theory (especially Theorem 14) and experiments point to the possibility of time-varying schedules to enable larger learning rates, on a much finer scale than cyclic restarts (Loshchilov & Hutter, 2016; Smith, 2017; Fu et al., 2019). A nascent line of work also challenges the classical $\eta_t \sim 1 / \lambda_{\max}$ wisdom from an empirical angle (Cohen et al., 2021), finding a phenomenon dubbed progressive sharpening during normal (smooth $\eta_t$ ) training.
|
| 479 |
+
End-to-end improvements on training benchmarks are outside the scope of this work: the learning rate schedule interacts with generalization (Jiang et al., 2020), batch normalization + weight decay (Li & Arora, 2019), batch size (Smith et al., 2018), adaptive preconditioners (Agarwal et al., 2020a) and now (from this work) acceleration. This adds yet one more perspective on why it is so difficult to standardize experimental controls and ablations in this space. Analogously, it has been proposed that momentum acts as a variance reduction mechanism (Li et al., 2017; Cutkosky & Orabona, 2019), alongside its classical role in acceleration.
|
| 480 |
+
As an invitation to try these ideas in various experimental settings, we provide in Appendix A some Python code to generate Chebyshev learning rates and fractal schedules.
|
| 481 |
+
|
| 482 |
+
# 7. Conclusion
|
| 483 |
+
|
| 484 |
+
We have revisited a lesser-known acceleration algorithm which uses a fractal learning rate schedule of reciprocal Chebyshev nodes, proved a stronger stability guarantee for its iterates, and developed some practical variants. Our experiments demonstrate promising empirical behaviors of the schedule beyond low-noise quadratics. We hope that this work provides new foundations towards investigating local optimization algorithms which take carefully scheduled "leaps of faith".
|
| 485 |
+
|
| 486 |
+
Open questions. We conclude with some natural follow-up questions for future work:
|
| 487 |
+
|
| 488 |
+
- Find "reasonable"6 (computationally efficient, oracle-efficient, and perturbation-stable) adaptive learning rate schedulers with accelerated convergence rates. What are the acceleration properties of commonly-used adaptive step size heuristics (Duchi et al., 2011; Kingma & Ba, 2014; Ward et al., 2019)?
|
| 489 |
+
- Do there exist learning rate schedules (adaptive or non-adaptive) which obtain the accelerated rate for general strongly convex $f$ , as opposed to only quadratics?
|
| 490 |
+
|
| 491 |
+
# Acknowledgments
|
| 492 |
+
|
| 493 |
+
We are grateful to Sham Kakade for helpful discussions and pointers to prior literature. Special thanks go to Maria Ratskevich for helping with the translation of (Lebedev & Finogenov, 1971).
|
| 494 |
+
|
| 495 |
+
# References
|
| 496 |
+
|
| 497 |
+
Aarts, E. and Korst, J. Simulated annealing and Boltzmann machines: a stochastic approach to combinatorial optimization and neural computing. John Wiley & Sons, Inc., 1989.
|
| 498 |
+
Agarwal, N., Anil, R., Hazan, E., Koren, T., and Zhang, C. Disentangling adaptive gradient methods from learning rates. arXiv preprint arXiv:2002.11803, 2020a.
|
| 499 |
+
Agarwal, N., Anil, R., Koren, T., Talwar, K., and Zhang, C. Stochastic optimization with laggard data pipelines. In Advances in Neural Information Processing Systems, volume 33, 2020b.
|
| 500 |
+
Aitken, A. C. XXV.—On Bernoulli's numerical solution of algebraic equations. Proceedings of the Royal Society of Edinburgh, 46:289-305, 1927.
|
| 501 |
+
Allen-Zhu, Z. and Hazan, E. Optimal black-box reductions between optimization objectives. arXiv preprint arXiv:1603.05642, 2016.
|
| 502 |
+
Allen-Zhu, Z. and Orecchia, L. Linear coupling: An ultimate unification of gradient and mirror descent. arXiv preprint arXiv:1407.1537, 2014.
|
| 503 |
+
Anderson, D. G. Iterative procedures for nonlinear integral equations. Journal of the ACM (JACM), 12(4):547-560, 1965.
|
| 504 |
+
Bach, F. Machine learning research blog, 2020. URL https://francisbach.com/ acceleration-without-pain/.
|
| 505 |
+
Barre, M., Taylor, A., and d'Aspremont, A. Complexity guarantees for polyak steps with momentum. In Conference on Learning Theory, pp. 452-478. PMLR, 2020.
|
| 506 |
+
Bello, I., Zoph, B., Vasudevan, V., and Le, Q. V. Neural optimizer search with reinforcement learning. In International Conference on Machine Learning, pp. 459-468. PMLR, 2017.
|
| 507 |
+
Bottou, L. and Bousquet, O. The tradeoffs of large scale learning. In Proceedings of the 20th International Conference on Neural Information Processing Systems, pp. 161-168, 2007.
|
| 508 |
+
|
| 509 |
+
Bousquet, O. and Elisseeff, A. Stability and generalization. The Journal of Machine Learning Research, 2:499-526, 2002.
|
| 510 |
+
Boyd, S. and Vandenberghe, L. Convex optimization. Cambridge University Press, 2004.
|
| 511 |
+
Brown, T. B., Mann, B., Ryder, N., Subbiah, M., Kaplan, J., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020.
|
| 512 |
+
Bubeck, S. Convex optimization: Algorithms and complexity. Foundations and Trends in Machine Learning, 8, 2017.
|
| 513 |
+
Bubeck, S. Nemirovski's acceleration (blog post), 2019. URL https://blogs.princeton.edu/imabandit/2019/01/09/nemirovskis-acceleration/.
|
| 514 |
+
Bubeck, S., Lee, Y. T., and Singh, M. A geometric alternative to nesterov's accelerated gradient descent. arXiv preprint arXiv:1506.08187, 2015.
|
| 515 |
+
Bubeck, S., Jiang, Q., Lee, Y. T., Li, Y., and Sidford, A. Near-optimal method for highly smooth convex optimization. In Conference on Learning Theory, pp. 492-507. PMLR, 2019.
|
| 516 |
+
Chebyshev, P. L. Theorie des mécanismes connus sous le nom de parallelogrammes. Imprimerie de l'Académie impérièale des sciences, 1853.
|
| 517 |
+
Chen, Y., Jin, C., and Yu, B. Stability and convergence tradeoff of iterative optimization algorithms. arXiv preprint arXiv:1804.01619, 2018.
|
| 518 |
+
Choi, D., Passos, A., Shallue, C. J., and Dahl, G. E. Faster neural network training with data echoing. arXiv preprint arXiv:1907.05550, 2019.
|
| 519 |
+
Cohen, J., Kaur, S., Li, Y., Kolter, J. Z., and Talwalkar, A. Gradient descent on neural networks typically occurs at the edge of stability. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=jh-rTtvkGeM.
|
| 520 |
+
Cutkosky, A. and Orabona, F. Black-box reductions for parameter-free online learning in banach spaces. In _Conference On Learning Theory_, pp. 1493-1529. PMLR, 2018.
|
| 521 |
+
Cutkosky, A. and Orabona, F. Momentum-based variance reduction in non-convex sgd. arXiv preprint arXiv:1905.10018, 2019.
|
| 522 |
+
d'Aspremont, A., Scieur, D., and Taylor, A. Acceleration methods. arXiv preprint arXiv:2101.09545, 2021.
|
| 523 |
+
|
| 524 |
+
Devolder, O., Glineur, F., and Nesterov, Y. First-order methods of smooth convex optimization with inexact oracle. Mathematical Programming, 146(1):37-75, 2014.
|
| 525 |
+
Dozat, T. Incorporating nesterov momentum into adam. 2016.
|
| 526 |
+
Duchi, J., Hazan, E., and Singer, Y. Adaptive subgradient methods for online learning and stochastic optimization. Journal of machine learning research, 12(7), 2011.
|
| 527 |
+
Flanders, D. A. and Shortley, G. Numerical determination of fundamental modes. Journal of Applied Physics, 21 (12):1326-1332, 1950.
|
| 528 |
+
Fu, H., Li, C., Liu, X., Gao, J., Celikyilmaz, A., and Carin, L. Cyclical annealing schedule: A simple approach to mitigating kl vanishing. arXiv preprint arXiv:1903.10145, 2019.
|
| 529 |
+
Gavurin, M. K. The use of polynomials of best approximation for improving the convergence of iterative processes. Uspekhi Matematicheskikh Nauk, 5(3):156-160, 1950.
|
| 530 |
+
Ge, R., Kakade, S. M., Kidambi, R., and Netrapalli, P. The step decay schedule: A near optimal, geometrically decaying learning rate procedure for least squares. Advances in Neural Information Processing Systems, 32: 14977-14988, 2019.
|
| 531 |
+
Giladi, N., Nacson, M. S., Hoffer, E., and Soudry, D. At stability's edge: How to adjust hyperparameters to preserve minima selection in asynchronous training of neural networks? In International Conference on Learning Representations, 2019.
|
| 532 |
+
Gorbunov, E., Hanzely, F., and Richtárik, P. A unified theory of sgd: Variance reduction, sampling, quantization and coordinate descent. In International Conference on Artificial Intelligence and Statistics, pp. 680-690. PMLR, 2020.
|
| 533 |
+
Gottlieb, D. and Orszag, S. A. Numerical analysis of spectral methods: theory and applications. SIAM, 1977.
|
| 534 |
+
Hardt, M., Recht, B., and Singer, Y. Train faster, generalize better: Stability of stochastic gradient descent. In International Conference on Machine Learning, pp. 1225-1234. PMLR, 2016.
|
| 535 |
+
Hazan, E. and Kakade, S. Revisiting the polyak step size. arXiv preprint arXiv:1905.00313, 2019.
|
| 536 |
+
He, K., Zhang, X., Ren, S., and Sun, J. Identity mappings in deep residual networks. In European conference on computer vision, pp. 630-645. Springer, 2016.
|
| 537 |
+
|
| 538 |
+
Hestenes, M. R. and Stiefel, E. Methods of conjugate gradients for solving linear systems. Journal of Research of the National Bureau of Standards, 49(6), 1952.
|
| 539 |
+
Higham, N. J. Accuracy and stability of numerical algorithms. SIAM, 2002.
|
| 540 |
+
Jiang, Z., Zhang, C., Talwar, K., and Mozer, M. C. Characterizing structural regularities of labeled data in overparameterized models. arXiv e-prints, pp. arXiv-2002, 2020.
|
| 541 |
+
Jin, C., Ge, R., Netrapalli, P., Kakade, S. M., and Jordan, M. I. How to escape saddle points efficiently. In International Conference on Machine Learning, pp. 1724-1732. PMLR, 2017.
|
| 542 |
+
Kelley, C. T. Iterative methods for optimization. SIAM, 1999.
|
| 543 |
+
Keskar, N. S., Mudigere, D., Nocedal, J., Smelyanskiy, M., and Tang, P. T. P. On large-batch training for deep learning: Generalization gap and sharp minima. arXiv preprint arXiv:1609.04836, 2016.
|
| 544 |
+
Kingma, D. P. and Ba, J. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.
|
| 545 |
+
Lan, G. An optimal method for stochastic composite optimization. Mathematical Programming, 133(1-2):365-397, 2012.
|
| 546 |
+
Lebedev, V. and Finogenov, S. Solution of the parameter ordering problem in chebyshev iterative methods. USSR Computational Mathematics and Mathematical Physics, 13(1):21-41, 1973.
|
| 547 |
+
Lebedev, V. and Finogenov, S. Utilization of ordered chebyshev parameters in iterative methods. USSR Computational Mathematics and Mathematical Physics, 16(4): 70-83, 1976.
|
| 548 |
+
Lebedev, V. and Finogenov, S. On construction of the stable permutations of parameters for the chebyshev iterative methods. part i. Russian Journal of Numerical Analysis and Mathematical Modelling, 17(5):437-456, 2002.
|
| 549 |
+
Lebedev, V. and Finogenov, S. On construction of the stable permutations of parameters for the chebyshev iterative methods. part ii. Russian Journal of Numerical Analysis and Mathematical Modelling, 19(3):251-263, 2004.
|
| 550 |
+
Lebedev, V. I. and Finogenov, S. The order of choice of the iteration parameters in the cyclic Chebyshev iteration method. Zhurnal Vychislitel'noi Matematiki i Matematicheskoi Fiziki, 11(2):425-438, 1971.
|
| 551 |
+
|
| 552 |
+
LeCun, Y., Boser, B., Denker, J. S., Henderson, D., Howard, R. E., Hubbard, W., and Jackel, L. D. Backpropagation applied to handwritten zip code recognition. Neural computation, 1(4):541-551, 1989.
|
| 553 |
+
LeCun, Y., Simard, P. Y., and Pearlmutter, B. Automatic learning rate maximization by on-line estimation of the hessian's eigenvectors. In Proceedings of the 5th International Conference on Neural Information Processing Systems, pp. 156-163, 1992.
|
| 554 |
+
Lessard, L., Recht, B., and Packard, A. Analysis and design of optimization algorithms via integral quadratic constraints. SIAM Journal on Optimization, 26(1):57-95, 2016.
|
| 555 |
+
Li, Q., Tai, C., and Weinan, E. Stochastic modified equations and adaptive stochastic gradient algorithms. In International Conference on Machine Learning, pp. 2101-2110. PMLR, 2017.
|
| 556 |
+
Li, Y., Wei, C., and Ma, T. Towards explaining the regularization effect of initial large learning rate in training neural networks. arXiv preprint arXiv:1907.04595, 2019.
|
| 557 |
+
Li, Z. and Arora, S. An exponential learning rate schedule for deep learning. arXiv preprint arXiv:1910.07454, 2019.
|
| 558 |
+
Li, Z. and Li, J. A fast anderson-chebyshev acceleration for nonlinear optimization. In International Conference on Artificial Intelligence and Statistics, pp. 1047-1057. PMLR, 2020.
|
| 559 |
+
Li, Z., Lyu, K., and Arora, S. Reconciling modern deep learning with traditional optimization analyses: The intrinsic learning rate. arXiv preprint arXiv:2010.02916, 2020.
|
| 560 |
+
Lin, H., Mairal, J., and Harchaoui, Z. Catalyst acceleration for first-order convex optimization: from theory to practice. Journal of Machine Learning Research, 18(1): 7854-7907, 2018.
|
| 561 |
+
Liu, D. C. and Nocedal, J. On the limited memory bfgs method for large scale optimization. Mathematical programming, 45(1):503-528, 1989.
|
| 562 |
+
Loshchilov, I. and Hutter, F. SGDR: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016.
|
| 563 |
+
Mason, J. C. and Handscomb, D. C. Chebyshev polynomials. CRC press, 2002.
|
| 564 |
+
Monteiro, R. D. and Svaiter, B. F. An accelerated hybrid proximal extragradients method for convex optimization and its implications to second-order methods. SIAM Journal on Optimization, 23(2):1092-1125, 2013.
|
| 565 |
+
|
| 566 |
+
Nesterov, Y. Accelerating the cubic regularization of newton's method on convex problems. Mathematical Programming, 112(1):159-181, 2008.
|
| 567 |
+
Nesterov, Y. E. A method of solving a convex programming problem with convergence rate $\mathrm{o}(\mathrm{k}^2)$ . In Doklady Akademii Nauk, volume 269, pp. 543-547. Russian Academy of Sciences, 1983.
|
| 568 |
+
Orabona, F. and Tommasi, T. Training deep networks without learning rates through coin betting. In Proceedings of the 31st International Conference on Neural Information Processing Systems, pp. 2157-2167, 2017.
|
| 569 |
+
Oymak, S. Super-convergence with an unstable learning rate. arXiv preprint arXiv:2102.10734, 2021.
|
| 570 |
+
O'Donoghue, B. and Candes, E. Adaptive restart for accelerated gradient schemes. Foundations of computational mathematics, 15(3):715-732, 2015.
|
| 571 |
+
Pedregosa, F. and Scieur, D. Acceleration through spectral density estimation. In III, H. D. and Singh, A. (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 7553-7562. PMLR, 13-18 Jul 2020. URL http://proceedings.mlr.press/v119/pedregosa20a.html.
|
| 572 |
+
Polyak, B. Some methods of speeding up the convergence of iteration methods. USSR Computational Mathematics and Mathematical Physics, 4(5):1-17, 1964a. ISSN 0041-5553.
|
| 573 |
+
Polyak, B. T. Some methods of speeding up the convergence of iteration methods. *USSR Computational Mathematics and Mathematical Physics*, 4(5):1-17, 1964b.
|
| 574 |
+
Polyak, B. T. Introduction to optimization. optimization software. Inc., Publications Division, New York, 1, 1987.
|
| 575 |
+
Richardson, L. F. The approximate arithmetical solution by finite differences of physical problems involving differential equations, with an application to the stresses in a masonry dam. Philosophical Transactions of the Royal Society of London. Series A, Containing Papers of a Mathematical or Physical Character, 210(459-470): 307-357, 1911.
|
| 576 |
+
Sachdeva, S. and Vishnoi, N. K. Faster algorithms via approximation theory. Theoretical Computer Science, 9 (2):125-210, 2013.
|
| 577 |
+
Schaul, T., Zhang, S., and LeCun, Y. No more pesky learning rates. In International Conference on Machine Learning, pp. 343-351. PMLR, 2013.
|
| 578 |
+
|
| 579 |
+
Scieur, D. and Pedregosa, F. Universal asymptotic optimality of polyak momentum. In III, H. D. and Singh, A. (eds.), Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 8565-8572. PMLR, 13-18 Jul 2020. URL http://proceedings.mlrpress/v119/scieur20a.html.
|
| 580 |
+
Shallue, C. J., Lee, J., Antognini, J., Sohl-Dickstein, J., Frostig, R., and Dahl, G. E. Measuring the effects of data parallelism on neural network training. Journal of Machine Learning Research, 20:1-49, 2019.
|
| 581 |
+
Sidi, A., Ford, W. F., and Smith, D. A. Acceleration of convergence of vector sequences. SIAM Journal on Numerical Analysis, 23(1):178-196, 1986.
|
| 582 |
+
Smith, L. N. Cyclic learning rates for training neural networks. In 2017 IEEE winter conference on applications of computer vision (WACV), pp. 464-472. IEEE, 2017.
|
| 583 |
+
Smith, S. L., Kindermans, P.-J., Ying, C., and Le, Q. V. Don't decay the learning rate, increase the batch size. In International Conference on Learning Representations, 2018.
|
| 584 |
+
Staib, M., Reddi, S., Kale, S., Kumar, S., and Sra, S. Escaping saddle points with adaptive gradient methods. In International Conference on Machine Learning, pp. 5956-5965. PMLR, 2019.
|
| 585 |
+
Stiefel, E. L. Kernel polynomial in linear algebra and their numerical applications. NBS Applied Math. Ser., 49:1-22, 1958.
|
| 586 |
+
Su, W., Boyd, S. P., and Candes, E. J. A differential equation for modeling nesterov's accelerated gradient method: Theory and insights. In NIPS, volume 14, pp. 2510-2518, 2014.
|
| 587 |
+
Sutskever, I., Martens, J., Dahl, G., and Hinton, G. On the importance of initialization and momentum in deep learning. In International conference on machine learning, pp. 1139-1147. PMLR, 2013.
|
| 588 |
+
Vishnoi, N. K. Laplacian solvers and their algorithmic applications. Theoretical Computer Science, 8(1-2):1-141, 2012.
|
| 589 |
+
Ward, R., Wu, X., and Bottou, L. Adagrad stepsizes: Sharp convergence over nonconvex landscapes. In International Conference on Machine Learning, pp. 6677-6686. PMLR, 2019.
|
| 590 |
+
Wibisono, A. and Wilson, A. C. On accelerated methods in optimization. arXiv preprint arXiv:1509.03616, 2015.
|
| 591 |
+
|
| 592 |
+
Wynn, P. On a device for computing the $\mathrm{e} \, \mathrm{m}$ (s n) transformation. Mathematical Tables and Other Aids to Computation, pp. 91-96, 1956.
|
| 593 |
+
You, Y., Li, J., Reddi, S., Hseu, J., Kumar, S., Bhojanapalli, S., Song, X., Demmel, J., Keutzer, K., and Hsieh, C.-J. Large batch optimization for deep learning: Training bert in 76 minutes. arXiv preprint arXiv:1904.00962, 2019.
|
| 594 |
+
Young, D. On richardson's method for solving linear systems with positive definite matrices. Journal of Mathematics and Physics, 32(1-4):243-255, 1953.
|
| 595 |
+
Zhang, G., Li, L., Nado, Z., Martens, J., Sachdeva, S., Dahl, G. E., Shallue, C. J., and Grosse, R. Which algorithmic choices matter at which batch sizes? Insights from a noisy quadratic model. arXiv preprint arXiv:1907.04164, 2019.
|
accelerationviafractallearningrateschedules/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b655bb8bcd9c10e7dd56cbe751afddb98aa42cf381530c4d2b3c381ee0f5de10
|
| 3 |
+
size 313457
|
accelerationviafractallearningrateschedules/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f18ef6648c3205ed35d7e2ea460fea0a3495b4ce0000adfc85ede34e4e55143b
|
| 3 |
+
size 707552
|
accumulateddecoupledlearningwithgradientstalenessmitigationforconvolutionalneuralnetworks/96c69955-85e8-43e3-8f6d-709b44b2162d_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9a363780105f44273befd57ae0b3fcae9d766b9f1fdeabdd3c5bfbaa589f3ea0
|
| 3 |
+
size 95333
|
accumulateddecoupledlearningwithgradientstalenessmitigationforconvolutionalneuralnetworks/96c69955-85e8-43e3-8f6d-709b44b2162d_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:70ed344055c419f47d69a346e016d666908372968b14df9f78fb784b6788e824
|
| 3 |
+
size 110476
|
accumulateddecoupledlearningwithgradientstalenessmitigationforconvolutionalneuralnetworks/96c69955-85e8-43e3-8f6d-709b44b2162d_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:05d808e9676ed44da39c258b63db951a25593a22e415d1eebc7adffa44809625
|
| 3 |
+
size 2056965
|
accumulateddecoupledlearningwithgradientstalenessmitigationforconvolutionalneuralnetworks/full.md
ADDED
|
@@ -0,0 +1,446 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accumulated Decoupled Learning with Gradient Staleness Mitigation for Convolutional Neural Networks
|
| 2 |
+
|
| 3 |
+
Huiping Zhuang<sup>1</sup> Zhenyu Weng<sup>1</sup> Fulin Luo<sup>1</sup> Kar-Ann Toh<sup>2</sup> Haizhou Li<sup>3</sup> Zhiping Lin<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
Gradient staleness is a major side effect in decoupled learning when training convolutional neural networks asynchronously. Existing methods that ignore this effect might result in reduced generalization and even divergence. In this paper, we propose an accumulated decoupled learning (ADL), which includes a module-wise gradient accumulation in order to mitigate the gradient staleness. Unlike prior arts ignoring the gradient staleness, we quantify the staleness in such a way that its mitigation can be quantitatively visualized. As a new learning scheme, the proposed ADL is theoretically shown to converge to critical points in spite of its asynchronism. Extensive experiments on CIFAR-10 and ImageNet datasets are conducted, demonstrating that ADL gives promising generalization results while the state-of-the-art methods experience reduced generalization and divergence. In addition, our ADL is shown to have the fastest training speed among the compared methods. The code will be ready soon in https://github.com/ZHUANGHP/Accumulated-Decoupled-Learning.git.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
Convolutional neural networks (CNNs) (LeCun et al., 1998) are normally trained by backpropagation (BP) which runs a forward pass followed by a backward one synchronously through a network before parameter update. The synchronism of BP is mainly characterized by three lockings (Jaderberg et al., 2016), i.e., the forward, the backward and the update lockings (see details in Section 3.1), which render the majority of a network idle during training. To improve the
|
| 12 |
+
|
| 13 |
+
$^{1}$ School of Electrical and Electronic Engineering, Nanyang Technological University. $^{2}$ Department of Electrical and Electronic Engineering, Yonsei University. $^{3}$ School of Electrical and Computer Engineering, National Univeristy of Singapore. Correspondence to: Zhiping Lin <ezplin@ntu.edu.sg>.
|
| 14 |
+
|
| 15 |
+
Proceedings of the $38^{th}$ International Conference on Machine Learning, PMLR 139, 2021. Copyright 2021 by the author(s).
|
| 16 |
+
|
| 17 |
+
efficiency, the decoupled learning (Jaderberg et al., 2016) emerges by addressing one or more of these lockings.
|
| 18 |
+
|
| 19 |
+
The decoupled learning partitions a network depth-wise into several modules with each module containing a stack of layers, and facilitates a parallel training among the partitioned modules. A large number of partitioned modules would encourage a high degree of efficiency being improved. For convenience, the partition size (PS) is taken as the number of modules being partitioned from a network. Methods of decoupled learning seek to deliver comparable generalization performance to its BP counterpart under various PS. We divide these prior arts into two groups: the delayed gradient (DG) based methods and the local error learning (LEL) based methods.
|
| 20 |
+
|
| 21 |
+
The DG-based methods (Huo et al., 2018a;b; Zhuang et al., 2021) adopt DGs to avoid the synchronism of BP. They deliver comparable generalization performance relative to the BP baseline, but introduce the gradient staleness (or the stale gradient effect). Such a staleness has limited the DG-based methods towards a small PS. That is, most DG-based methods only perform well for $\mathrm{PS} \leq 4$ (Huo et al., 2018a;b). This is because the gradient staleness grows as PS increases, causing a reduced generalization or even divergence.
|
| 22 |
+
|
| 23 |
+
The staleness issue does not appear in the LEL-based methods as they sever the gradient flow between adjacent modules by building auxiliary networks to generate local error gradients. This allows the modules to avoid the global BP thereby addressing all the three lockings. Methods in this group are distinguished by the design of auxiliary networks. The LEL-based methods do not experience divergence as PS increases, but usually encounter generalization loss instead.
|
| 24 |
+
|
| 25 |
+
In summary, the DG-based methods can give better generalization results than LEL-methods but they are constrained by a limited PS due to the staleness issue. Dwelling on the DG-based methods, we aim to scale the learning to a large PS with comparable generalization performance (see Table 1). To this end, the key is to reduce the gradient staleness. Here, we propose an accumulated decoupled learning (ADL), which effectively incorporates a module-wise gradient accumulation (mGA) technique in the partitioned modules to mitigate the gradient staleness. This work includes the following contributions:
|
| 26 |
+
|
| 27 |
+
- Proposal of a new decoupled learning technique by addressing all the three locking problems in BP.
|
| 28 |
+
- Incorporation of an mGA technique into the decoupled learning, which has been shown to reduce the gradient staleness, theoretically and empirically.
|
| 29 |
+
- Convergence analysis which shows that our method can converge to critical points.
|
| 30 |
+
- Experimental validation on CIFAR-10 and ImageNet datasets. Particularly, we show that the ADL in general outperforms the state-of-the-arts especially under the scenario of a large PS, and has the fastest training speed among the compared methods.
|
| 31 |
+
|
| 32 |
+
# 2. Related Works
|
| 33 |
+
|
| 34 |
+
# 2.1. Local Error Learning Based Methods
|
| 35 |
+
|
| 36 |
+
The key feature of LEL-based methods is the design of auxiliary networks. They originate from the decoupled neural interface (DNI) (Jaderberg et al., 2016) by generating local synthetic gradients. This approach is followed up by (Mostafa et al., 2018) using a local classifier. The decoupled greedy learning (DGL) (Belilovsky et al., 2020) designs a light-weight auxiliary network for the purpose of making a trade-off between the generalization performance and the computation workload. Although the pred-sim method in (Nøkland & Eidnes, 2019) with combined losses manages to obtain a comparable performance with the BP baseline, it has only been verified in relatively shallow networks ( $\leq 13$ layers).
|
| 37 |
+
|
| 38 |
+
# 2.2. Delayed Gradient Based Methods
|
| 39 |
+
|
| 40 |
+
The DG-based methods attain decoupled learning by updating the network modules with DGs. The decoupled parallel BP with DGs (DDG) (Huo et al., 2018b) breaks the backward locking while having a comparable performance to BP on the ResNet (RN) (He et al., 2016) under a small PS ( $\leq 4$ ). The future replay (FR) (Huo et al., 2018a) that follows up employs a recomputation unit with inconsistent weights and gradients during the forward and the backward passes. The FR also unlocks the backward pass and this gives as equally good performance as the BP baseline for small PS values. The fully decoupled method with DGs (Zhuang et al., 2021) further addresses the forward and the update lockings, and this leads to a lock-free decoupled learning. Apart from these, a technique called DSP (Xu et al., 2020) has also attained a lock-free decoupled learning. However, these prior arts using DGs inevitably suffer from the gradient staleness, which becomes apparent as the PS grows.
|
| 41 |
+
|
| 42 |
+
Table 1. Characteristics of decoupled learning methods and the proposed ADL.
|
| 43 |
+
|
| 44 |
+
<table><tr><td>Methods</td><td>DG-based methods</td><td>LEL-based methods</td><td>ADL (ours)</td></tr><tr><td>Generalization v.s BP</td><td>comparable</td><td>worse</td><td>comparable</td></tr><tr><td>Partition size</td><td>small</td><td>large</td><td>large</td></tr></table>
|
| 45 |
+
|
| 46 |
+
# 2.3. Other Works Related to Decoupled Learning
|
| 47 |
+
|
| 48 |
+
Another set of methods, named asynchronous stochastic gradient descent (ASGD) (Dean et al., 2012; Lian et al., 2015; Zheng et al., 2017), involves utilization of DGs to perform asynchronous parallelization. Essentially, the ASGD methods differ from the decoupled learning in terms of parallelization paradigm. They belong to the data parallelism category as each work handles a complete network replica. In addition, the pipeline BP shares certain similarity with the decoupled learning. For instance, the GPipe proposed in (Huang et al., 2019) splits a network into several modules and breaks each mini-batch into several micro-batches to conduct pipeline model parallelization. The micro-batching shares some similarities with the mGA technique in our proposed ADL. However, unlike decoupled learning, the GPipe is a form of synchronous model parallelism, which does not involve DGs.
|
| 49 |
+
|
| 50 |
+
# 3. Preliminaries
|
| 51 |
+
|
| 52 |
+
Here, we revisit the necessary knowledge for training a feedforward neural network, including the generic gradient accumulation (gGA) used in BP learning. Along with this revisit, the BP lockings (Jaderberg et al., 2016) as well as the gradient staleness are also explained.
|
| 53 |
+
|
| 54 |
+
# 3.1. Backpropagation and Lockings
|
| 55 |
+
|
| 56 |
+
Assume that we need to train an $\mathcal{L}$ -layer network. The $l^{\mathrm{th}}$ $(1\leq l\leq \mathcal{L})$ layer produces an activation $z_{l} = F_{l}(z_{l - 1};\pmb{\theta}_{l})$ by taking $z_{l - 1}$ as its input, where $F_{l}$ is an activation function and $\pmb{\theta}_l\in \mathbb{R}^{n_l}$ is weight vector at layer $l$ . The sequential generation of the activations results in a forward locking since $z_{l}$ depends on the activations from its previous layers. Let $\pmb {\theta} = [\pmb{\theta}_1^T,\pmb{\theta}_2^T,\dots,\pmb{\theta}_\mathcal{L}^T ]^T\in \mathbb{R}^{\Sigma_{i = 1}^\mathcal{L}n_i}$ denote the parameter vector of the entire network. Assume that $f$ is a loss function. Training the feedforward network can then be formulated as
|
| 57 |
+
|
| 58 |
+
$$
|
| 59 |
+
\underset {\theta} {\text {m i n i m i z e}} \quad f _ {x} (\theta) \tag {1}
|
| 60 |
+
$$
|
| 61 |
+
|
| 62 |
+
where $\pmb{x}$ represents the entire input-label information (or the entire dataset). In the rest of this paper, we shall use $f(\pmb{\theta})$ to represent $f_{\pmb{x}}(\pmb{\theta})$ for convenience.
|
| 63 |
+
|
| 64 |
+
The gradient descent algorithm is often used to solve Eq. (1) by updating the parameter $\theta$ iteratively as follows:
|
| 65 |
+
|
| 66 |
+
$$
|
| 67 |
+
\boldsymbol {\theta} ^ {t + 1} = \boldsymbol {\theta} ^ {t} - \gamma_ {t} \bar {\boldsymbol {g}} _ {\theta} ^ {t} \tag {2}
|
| 68 |
+
$$
|
| 69 |
+
|
| 70 |
+
or equivalently,
|
| 71 |
+
|
| 72 |
+
$$
|
| 73 |
+
\boldsymbol {\theta} _ {l} ^ {t + 1} = \boldsymbol {\theta} _ {l} ^ {t} - \gamma_ {t} \bar {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {l}} ^ {t}, l = 1, \dots , \mathcal {L} \tag {3}
|
| 74 |
+
$$
|
| 75 |
+
|
| 76 |
+
where $\gamma_{t}$ is the learning rate. Index $t$ here usually implies the batch index, with $\bar{\pmb{g}}_{\pmb{\theta}_l}^t$ indicating the gradient obtained with respect to (w.r.t.) data batch $t$ . Let $\bar{\pmb{g}}_{\theta}^{t} = [(\bar{\pmb{g}}_{\pmb{\theta}_{1}}^{t})^{T},(\bar{\pmb{g}}_{\pmb{\theta}_{2}}^{t})^{T},\dots,(\bar{\pmb{g}}_{\pmb{\theta}_{\mathcal{L}}}^{t})^{T}]^{T}\in \mathbb{R}^{\Sigma_{i = 1}^{\mathcal{L}}n_{i}}$ , which is obtained by
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
\bar {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {l}} ^ {t} = \frac {\partial f (\boldsymbol {\theta} ^ {t})}{\partial \boldsymbol {\theta} _ {l} ^ {t}}. \tag {4}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
If the dataset is large, the stochastic gradient descent (SGD) is often used as an alternative:
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
\boldsymbol {g} _ {\boldsymbol {\theta} _ {l}} ^ {t} = \frac {\partial f _ {\boldsymbol {x} _ {t}} \left(\boldsymbol {\theta} ^ {t}\right)}{\partial \boldsymbol {\theta} _ {l} ^ {t}} \tag {5}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
where $\boldsymbol{x}_t$ is the $t^{\text{th}}$ mini-batch drawn from the dataset $\boldsymbol{x}$ . We remove the bar “-” on $\boldsymbol{g}$ to tell its difference from that in Eq. (4). Accordingly, the network weights can be updated through
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
\boldsymbol {\theta} _ {l} ^ {t + 1} = \boldsymbol {\theta} _ {l} ^ {t} - \gamma_ {t} \boldsymbol {g} _ {\boldsymbol {\theta} _ {l}} ^ {t}, l = 1, \dots , \mathcal {L}. \tag {6}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
Assume that each sample is randomly drawn from a uniform distribution. Then the gradient is unbiased:
|
| 95 |
+
|
| 96 |
+
$$
|
| 97 |
+
\mathbb {E} _ {\boldsymbol {x}} \left\{\boldsymbol {g} _ {\boldsymbol {\theta} _ {l}} ^ {t} \right\} = \bar {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {l}} ^ {t} \tag {7}
|
| 98 |
+
$$
|
| 99 |
+
|
| 100 |
+
where the expectation $\mathbb{E}_{\boldsymbol{x}}$ is taken w.r.t. the random variable that draws $x_{t}$ from the dataset.
|
| 101 |
+
|
| 102 |
+
To obtain the gradient vectors, the BP is used. We can calculate the gradient at layer $l$ using the gradient backpropagated from layers $j$ and $i$ ( $l < j < i$ ) as follows:
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
\boldsymbol {g} _ {\boldsymbol {\theta} _ {l}} ^ {t} = \frac {\partial f _ {\boldsymbol {x} _ {t}} (\boldsymbol {\theta} ^ {t})}{\partial \boldsymbol {\theta} _ {l} ^ {t}} = \frac {\partial \boldsymbol {z} _ {j} ^ {t}}{\partial \boldsymbol {\theta} _ {l} ^ {t}} \frac {\partial f _ {\boldsymbol {x} _ {t}} (\boldsymbol {\theta} ^ {t})}{\partial \boldsymbol {z} _ {j} ^ {t}} = \frac {\partial \boldsymbol {z} _ {j} ^ {t}}{\partial \boldsymbol {\theta} _ {l} ^ {t}} \boldsymbol {g} _ {\boldsymbol {z} _ {j}} ^ {t} \tag {8}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
where
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
\boldsymbol {g} _ {\boldsymbol {z} _ {j}} ^ {t} = \frac {\partial f _ {\boldsymbol {x} _ {t}} (\boldsymbol {\theta} ^ {t})}{\partial \boldsymbol {z} _ {j} ^ {t}} = \frac {\partial \boldsymbol {z} _ {i} ^ {t}}{\partial \boldsymbol {z} _ {j} ^ {t}} \frac {\partial f _ {\boldsymbol {x} _ {t}} (\boldsymbol {\theta} ^ {t})}{\partial \boldsymbol {z} _ {i} ^ {t}} = \frac {\partial \boldsymbol {z} _ {i} ^ {t}}{\partial \boldsymbol {z} _ {j} ^ {t}} \boldsymbol {g} _ {\boldsymbol {z} _ {i}} ^ {t}. \tag {9}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
Here we introduce $g_{z_j}^t$ — the gradient vector w.r.t. activation $z_j$ — because it travels through modules for communication in our ADL. Eq. (8) and Eq. (9) indicate that $g_{\theta_l}^t$ is obtained based on $g_{z_j}^t$ and $g_{z_i}^t$ . That is, the gradient is not accessible before the forward pass has been completed and all the dependent gradients have been obtained, which is known as the backward locking. On the other hand, we cannot update the weights before every layer finishes its forward pass, which is recognized as the update locking.
|
| 115 |
+
|
| 116 |
+
# 3.2. Learning with Generic Gradient Accumulation
|
| 117 |
+
|
| 118 |
+
The gGA has frequently been adopted to increase the minibatch size for training networks on devices with a relatively limited memory setting. The gradients obtained based on several mini-batches are accumulated before they are finally applied to update the network.
|
| 119 |
+
|
| 120 |
+
To describe the training development involving the gGA technique, we introduce an update index $s$ , and a wrapped batch index $U_{s}$ w.r.t. the original batch index $t$ . We use the update index $s$ to indicate the $s^{\mathrm{th}}$ parameter update of the network. It is connected to $U_{s}$ in the way of $U_{s} = Ms$ given $M$ gGA steps. The gGA renders the network parameters unchanged for $M$ steps, i.e., $\theta_l^{U_s} = \theta_l^{U_s + 1} = \dots = \theta_l^{U_s + M - 1}$ . Conversely, we can tell the update index from a batch index $t$ by
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
s = \lfloor t / M \rfloor \tag {10}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
where $\lfloor x\rfloor = \max \{n\in \mathbb{Z}|n\leq x\}$ is the floor operator. That is, when the network is processing the $t^{\mathrm{th}}$ mini-batch of data, the network has been updated for $s$ times based on Eq. (10).
|
| 127 |
+
|
| 128 |
+
Assume that the gradients w.r.t. batch indexes $t = U_s, U_s + 1, \ldots, U_s + M - 1$ are accumulated. These gradients are obtained through
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
\boldsymbol {g} _ {\boldsymbol {\theta} _ {l}} ^ {t} = \frac {\partial f _ {\boldsymbol {x} _ {t}} \left(\boldsymbol {\theta} ^ {t}\right)}{\partial \boldsymbol {\theta} _ {l} ^ {U _ {s}}} \tag {11}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
where parameter $\pmb{\theta}_l^{U_s} = \pmb{\theta}_l^{U_{[t / M]}}$ is adopted instead of Eq. (5) in order to emphasize that the gradients w.r.t. to these data batches are being obtained based on the same parameter. Using the gGA technique, the weights are updated as follows:
|
| 135 |
+
|
| 136 |
+
$$
|
| 137 |
+
\boldsymbol {\theta} _ {l} ^ {U _ {s + 1}} = \boldsymbol {\theta} _ {l} ^ {U _ {s}} - \gamma_ {s} (1 / M) \sum_ {j = 0} ^ {M - 1} \boldsymbol {g} _ {\boldsymbol {\theta} _ {l}} ^ {U _ {s} + j}. \tag {12}
|
| 138 |
+
$$
|
| 139 |
+
|
| 140 |
+
# 3.3. Gradient Staleness
|
| 141 |
+
|
| 142 |
+
The network is commonly updated with gradients obtained w.r.t. the current parameters. However, there are certain scenarios where the network has to update its parameters with gradients calculated based on "older" parameters. This introduces gradient staleness or stale gradient effect, as the gradients are not up-to-date, and are therefore less accurate.
|
| 143 |
+
|
| 144 |
+
We define the level of staleness (LoS) as the update index difference between the current parameter and the parameter used to calculate the stale gradient. Suppose a network is updated through
|
| 145 |
+
|
| 146 |
+
$$
|
| 147 |
+
\boldsymbol {\theta} _ {l} ^ {t + 1} = \boldsymbol {\theta} _ {l} ^ {t} - \gamma_ {t} \boldsymbol {g} _ {\boldsymbol {\theta} _ {l}} ^ {t - d}, l = 1, \dots , \mathcal {L} \tag {13}
|
| 148 |
+
$$
|
| 149 |
+
|
| 150 |
+
where $\pmb{g}_{\pmb{\theta}_l}^{t - d} = \partial f_{\pmb{x}_{t - d}}(\pmb{\theta}^{t - d}) / \partial \pmb{\theta}_l^{t - d}$ . For a gGA step of $M$ , we could calculate the LoS through
|
| 151 |
+
|
| 152 |
+
$$
|
| 153 |
+
\operatorname {L o S} = \left\lfloor t / M \right\rfloor - \left\lfloor (t - d) / M \right\rfloor \tag {14}
|
| 154 |
+
$$
|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
Figure 1. Training a 3-layer network by ADL with $K = 3$ and mGA steps of (a) $M = 2$ and (b) $M = 4$ suggesting that each module is updated every 2 and 4 iterations respectively. Note that there is a batch index difference of $2(K - k)$ between the forward and backward pass. For instance, for $K = 3$ , module $k = 1$ (gray) generates an activation at iteration 1 but the gradient of this batch arrives at iteration 5. In addition, a larger $M$ reduces the LoS according to Eq. (14). For instance, at $t = 5$ module 1 (gray) has LoS of 2 with $M = 2$ , which is reduced to 1 with $M = 4$ .
|
| 158 |
+
|
| 159 |
+
indicating that the current parameter is $\pmb{\theta}_l^{U_{[t / M]}}$ while the parameter used to calculate gradient $\pmb{g}_{\pmb{\theta}_l}^{t - d}$ is $\pmb{\theta}_l^{U_{[t - d) / M]}}$
|
| 160 |
+
|
| 161 |
+
# 4. An Accumulated Decoupled Learning
|
| 162 |
+
|
| 163 |
+
In this section, we show the algorithmic details of the proposed ADL, which include an asynchronous pipelining procedure to achieve model parallelism, and an mGA technique to mitigate the gradient staleness. In particular, we explicitly show how the staleness in each module is reduced.
|
| 164 |
+
|
| 165 |
+
Prior to our development, the network is first partitioned depth-wise into $K$ modules with a stack of layers in each module. That is, we partition the set of layer indices $\{1,\dots ,\mathcal{L}\}$ into $\{q(1),q(2),\ldots ,q(K)\}$ where $q(k) = \{m_k,m_k + 1,\dots,m_{k + 1} - 1\}$ denotes the layer indices in module $k$ . This leads to certain notation changes as follows:
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
\begin{array}{l} \boldsymbol {\theta} ^ {t} = \left[ \left(\boldsymbol {\theta} _ {q (1)} ^ {t}\right) ^ {T}, \dots , \left(\boldsymbol {\theta} _ {q (K)} ^ {t}\right) ^ {T} \right] ^ {T}, \boldsymbol {\theta} _ {q (k)} ^ {t} = \left[ \left(\boldsymbol {\theta} _ {m _ {k}} ^ {t}\right) ^ {T}, \dots , \left(\boldsymbol {\theta} _ {m _ {k + 1} - 1} ^ {t}\right) ^ {T} \right] ^ {T} \\ \boldsymbol {g} _ {\boldsymbol {\theta}} ^ {t} = \left[ \left(\boldsymbol {g} _ {\boldsymbol {\theta} _ {q (1)}} ^ {t}\right) ^ {T}, \dots , \left(\boldsymbol {g} _ {\boldsymbol {\theta} _ {q (K)}} ^ {t}\right) ^ {T} \right] ^ {T}, \boldsymbol {g} _ {\boldsymbol {\theta} _ {q (k)}} ^ {t} = \left[ \left(\boldsymbol {g} _ {\boldsymbol {\theta} _ {m _ {k}}} ^ {t}\right) ^ {T}, \dots , \left(\boldsymbol {g} _ {\boldsymbol {\theta} _ {m _ {k + 1} - 1}} ^ {t}\right) ^ {T} \right] ^ {T} \\ \bar {\boldsymbol {g}} _ {\boldsymbol {\theta}} ^ {t} = [ (\bar {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {q (1)}} ^ {t}) ^ {T}, \dots , (\bar {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {q (K)}} ^ {t}) ^ {T} ] ^ {T}, \bar {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {q (k)}} ^ {t} = [ (\bar {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {m _ {k}}} ^ {t}) ^ {T}, \dots , (\bar {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {m _ {k + 1 - 1}}} ^ {t}) ^ {T} ] ^ {T}. \\ \end{array}
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
We depict the proposed ADL with an example of training a 3-layer network with a PS of $K = 3$ in Fig. 1(a) ( $M = 2$ ) and 1(b) ( $M = 4$ ) respectively. As illustrated in the figures, at every iteration, each module runs a forward pass and a backward pass. The forward pass is executed with a module input that comes from the output of the lower module at the previous instance. The backward pass calculates the gradients by resuming a local BP using gradients inherited
|
| 172 |
+
|
| 173 |
+
from the upper module based on the "older" data batches. Note that all the partitioned modules can run in parallel due to asynchronism as the modules process data from different batches. Next, each module accumulates gradients for $M$ steps before the gradients are applied to update the module weights.
|
| 174 |
+
|
| 175 |
+
# 4.1. Asynchronous Learning without BP Lockings
|
| 176 |
+
|
| 177 |
+
Consider the weights of module $k$ ( $k = 1, \ldots, K$ ) at update index $s$ with $\pmb{\theta}_{q(k)}^{U_s}$ . We detail the learning procedures in module $k$ to conduct update $s + 1$ as follows.
|
| 178 |
+
|
| 179 |
+
# 4.1.1. FORWARD PASS
|
| 180 |
+
|
| 181 |
+
Module $k$ conducts the forward passes using data batches with indexes $U_{s}, U_{s} + 1, \ldots, U_{s} + M - 1$ . Let $j = 0, 1, \ldots, M - 1$ . In detail, we feed the module input $\pmb{z}_{m_k - 1}^{U_s + j}$ received from module $k - 1$ to generate activations in each layer, which are obtained w.r.t. the same parameter $\theta_{q(k)}^{U_s}$ . Next, we obtain the activation $\pmb{z}_{m_{k + 1} - 1}^{U_s + j}$ at the end of this module, and send this activation to module $k + 1$ (if any).
|
| 182 |
+
|
| 183 |
+
# 4.1.2. BACKWARD PASS
|
| 184 |
+
|
| 185 |
+
During the backward pass, module $k$ resumes BP locally using the gradient $\pmb{g}_{z_{m_{k + 1} - 1}}^{U_s + j - 2(K - k)}$ received from module $k + 1$ . Note that the superscript $U_{s} + j - 2(K - k)$ indicates
|
| 186 |
+
|
| 187 |
+
that there are $2(K - k)$ steps of batch index delay w.r.t. the forward pass (see Fig. 1 for illustration). Accordingly, we calculate the gradients at each layer $(m_k \leq l \leq m_{k+1} - 1)$ within this module as follows:
|
| 188 |
+
|
| 189 |
+
$$
|
| 190 |
+
\hat {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {l}} ^ {U _ {s} + j} = \frac {\partial \boldsymbol {z} _ {m _ {k + 1} - 1} ^ {U _ {s} + j - 2 (K - k)}}{\partial \boldsymbol {\theta} _ {l} ^ {U _ {\lfloor (U _ {s} + j - 2 (K - k)) / M \rfloor}}} \boldsymbol {g} _ {\boldsymbol {z} _ {m _ {k + 1} - 1}} ^ {U _ {s} + j - 2 (K - k)}. \tag {15}
|
| 191 |
+
$$
|
| 192 |
+
|
| 193 |
+
Note that Eq. (15) is obtained w.r.t. $\theta_{l}^{U_{\lfloor (Us + j - 2(K - k)) / M\rfloor}}$ with update index $\lfloor (U_s + j - 2(K - k)) / M\rfloor$ instead of $s$ . This is because the gradient is calculated based on the "older" data batches, which can tell their corresponding update indexes from Eq. (10). At the end of the local BP, gradient $g_{z_{m_k - 1}}^{U_s + j - 2(K - k)}$ w.r.t. the module input $z_{m_k - 1}^{U_s + j - 2(K - k)}$ is generated, which is then sent to module $k - 1$ (if any).
|
| 194 |
+
|
| 195 |
+
# 4.2. Update with Module-wise Gradient Accumulation
|
| 196 |
+
|
| 197 |
+
After obtaining the gradients using Eq. (15), the module is not updated immediately. Instead, we applied the mGA by accumulating these gradients for $M$ steps before they are applied to update the corresponding module as follows:
|
| 198 |
+
|
| 199 |
+
$$
|
| 200 |
+
\boldsymbol {\theta} _ {l} ^ {U _ {s + 1}} = \boldsymbol {\theta} _ {l} ^ {U _ {s}} - \gamma_ {s} (1 / M) \sum_ {j = 0} ^ {M - 1} \hat {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {l}} ^ {U _ {s} + j}. \tag {16}
|
| 201 |
+
$$
|
| 202 |
+
|
| 203 |
+
Note that all the modules must consistently accumulate gradients calculated w.r.t. the same group of data batches, though the updates would happen asynchronously. For instance, with an mGA step of 2, if module 1 decides to accumulate batch 10 and 11, the other modules must accumulate these two batches, instead of other possible combinations such as 9 and 10, or 10 and 11. This module-wise nursing step differentiates the mGA from the gGA in the synchronized learning. We summarize the proposed ADL in Algorithm 1.
|
| 204 |
+
|
| 205 |
+
Note that the above ADL is a lock-free decoupled technique. Firstly, the global BP is cast into local BPs in each module running in parallel, which removes the backward locking. Secondly, the split modules adopt training data from different batches so that the forward passes can be executed without waiting for the data from the lower layers. This tackles the forward locking. Finally, each module is updated immediately without waiting for other modules to complete their forward passes, hence addressing the update locking.
|
| 206 |
+
|
| 207 |
+
# 4.3. Impact of Module-wise Gradient Accumulation
|
| 208 |
+
|
| 209 |
+
Indicated by Eq. (15), the gradients are obtained based on $\theta^{U_{\lfloor (U_s + j - 2(K - k)) / M\rfloor}}$ while the parameter state is $\theta^{U_s}$ . Therefore, according to Eq. (14), the LoS for module $k$ is shown as follows ( $j = 0,1,\ldots ,M - 1$ ):
|
| 210 |
+
|
| 211 |
+
$$
|
| 212 |
+
d _ {k, j} = s - \left\lfloor \left(U _ {s} + j - 2 (K - k)\right) / M \right\rfloor . \tag {17}
|
| 213 |
+
$$
|
| 214 |
+
|
| 215 |
+
# Algorithm 1 The proposed ADL
|
| 216 |
+
|
| 217 |
+
Partition the network into $K$ modules, and set mGA step of $M$ ; for each iteration do
|
| 218 |
+
|
| 219 |
+
# for $k\gets 1$ to $K$ (Parallel) do
|
| 220 |
+
|
| 221 |
+
Forward pass: generate the activations with module input (e.g., $z_{m_k - 1}^{U_s + j}$ ), and send the module output (e.g., $z_{m_k + 1}^{U_s + j}$ ) to module $k + 1$ (if any);
|
| 222 |
+
|
| 223 |
+
Backward pass: using gradient (e.g., $g_{z_{m_{k+1}-1}}^{U_s + j - 2(K - k)}$ ) received from module $k + 1$ to calculate the gradients in each layer following Eq. (15), and send the gradient w.r.t. the module input (e.g., $g_{z_{m_k-1}}^{U_s + j - 2(K - k)}$ ) to module $k - 1$ (if any);
|
| 224 |
+
|
| 225 |
+
Update: if module $k$ accumulates gradients from batches $U_{s}, U_{s} + 1, \ldots, U_{s} + M - 1$ then
|
| 226 |
+
|
| 227 |
+
Update the module using Eq. (16); end
|
| 228 |
+
|
| 229 |
+
end
|
| 230 |
+
|
| 231 |
+
end
|
| 232 |
+
|
| 233 |
+

|
| 234 |
+
Figure 2. The averaged LoS w.r.t. mGA step of $M$ .
|
| 235 |
+
|
| 236 |
+
For instance, as shown in Fig. 1(b), with $M = 4$ module 2 updates its parameters using gradients with staleness $d_{k,j}$ ( $1 \leq k \leq K, 0 \leq j < M$ ) of $d_{2,0} = 1$ , $d_{2,1} = 1$ , $d_{2,2} = 0$ , and $d_{2,3} = 0$ . Eq. (17) also indicates the staleness range:
|
| 237 |
+
|
| 238 |
+
$$
|
| 239 |
+
0 \leq d _ {k, j} \leq 2 (K - k) \tag {18}
|
| 240 |
+
$$
|
| 241 |
+
|
| 242 |
+
with the minimum $d_{k,j}$ reached for $j - 2(K - k) > 0$ and the maximum $d_{k,j} = 2(K - k)$ obtained at $M = 1$ indicating no mGA involved. For convenience, we adopt the averaged LoS:
|
| 243 |
+
|
| 244 |
+
$$
|
| 245 |
+
\bar {d} _ {k} = (1 / M) \sum_ {j = 0} ^ {M - 1} d _ {k, j} \tag {19}
|
| 246 |
+
$$
|
| 247 |
+
|
| 248 |
+
to evaluate the staleness in module $k$ . As an example, Fig. 2 shows the averaged LoS in module 1 and 6 w.r.t. the accumulation step $M$ with $K = 8$ , where the gradient staleness is shown to reduce with increasing $M$ .
|
| 249 |
+
|
| 250 |
+
A large $M$ leads to low gradient staleness, but it does not necessarily guarantee improvement in network generalization. This is because a larger $M$ also indicates that the overall mini-batch size is large, which could weaken the network's ability to generalize (Keskar et al., 2016). As a result, $M$ is an additional hyperparameter that handles the delicate balance between gradient staleness and generalization. In
|
| 251 |
+
|
| 252 |
+
fact, we do not need a large $M$ to help the optimization because a small $M$ has a rather significant impact on staleness reduction. As shown in Fig. 2, with $M = 4$ the averaged LoS has already been reduced by $75\%$ (e.g., from 16 to 4).
|
| 253 |
+
|
| 254 |
+
In addition, using Eq. (17), we can unpack Eq. (15):
|
| 255 |
+
|
| 256 |
+
$$
|
| 257 |
+
\begin{array}{l} \hat {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {l} ^ {U _ {s}}} ^ {U _ {s} + j} = \frac {\partial \boldsymbol {z} _ {m _ {k + 1} - 1} ^ {U _ {s} + j - 2 (K - k)}}{\partial \boldsymbol {\theta} _ {l} ^ {U _ {s} - d _ {k , j}}} \frac {\partial f _ {\boldsymbol {x} _ {U _ {s} + j - 2 (K - k)}} (\boldsymbol {\theta} ^ {U _ {s - d _ {k , j}}})}{\partial \boldsymbol {z} _ {m _ {k + 1} - 1} ^ {U _ {s} + j - 2 (K - k)}} \\ = \frac {\partial f _ {\boldsymbol {x} _ {U s + j - 2 (K - k)}} \left(\boldsymbol {\theta} ^ {U _ {s - d _ {k , j}}}\right)}{\partial \boldsymbol {\theta} _ {l} ^ {U _ {s - d _ {k , j}}}} = \boldsymbol {g} _ {\boldsymbol {\theta} _ {l}} ^ {U _ {s} + j - 2 (K - k)} \tag {20} \\ \end{array}
|
| 258 |
+
$$
|
| 259 |
+
|
| 260 |
+
and then rewrites Eq. (16) as
|
| 261 |
+
|
| 262 |
+
$$
|
| 263 |
+
\boldsymbol {\theta} _ {l} ^ {U _ {s + 1}} = \boldsymbol {\theta} _ {l} ^ {U _ {s}} - \gamma_ {s} (1 / M) \sum_ {j = 0} ^ {M - 1} \boldsymbol {g} _ {\boldsymbol {\theta} _ {l}} ^ {U _ {s} + j - 2 (K - k)}. \tag {21}
|
| 264 |
+
$$
|
| 265 |
+
|
| 266 |
+
That is, the proposed ADL accumulates gradients that are $2(K - k)$ steps "older", while each of these accumulated gradients admits an LoS of $d_{k,j}$ as shown in Eq. (17).
|
| 267 |
+
|
| 268 |
+
# 5. Convergence Analysis
|
| 269 |
+
|
| 270 |
+
In this section, we conduct convergence analysis for the proposed method. The analysis shows that the ADL can converge to critical points based on the following assumptions.
|
| 271 |
+
|
| 272 |
+
Assumption 1. Lipschitz continuity of gradients for loss functions $f(\pmb{\theta})$ , which means $\exists L \in \mathbb{R}^{+}$ such that:
|
| 273 |
+
|
| 274 |
+
$$
|
| 275 |
+
\left| \left| \bar {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {l}} ^ {U _ {\alpha}} - \bar {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {l}} ^ {U _ {\beta}} \right| \right| _ {2} \leq L \left| \left| \boldsymbol {\theta} _ {l} ^ {U _ {\alpha}} - \boldsymbol {\theta} _ {l} ^ {U _ {\beta}} \right| \right| _ {2} \tag {22}
|
| 276 |
+
$$
|
| 277 |
+
|
| 278 |
+
where $\| .\| _2$ is an $l_{2}$ -norm operator. This also leads to
|
| 279 |
+
|
| 280 |
+
$$
|
| 281 |
+
\left| \left| \bar {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {q (k)}} ^ {U _ {\alpha}} - \bar {\boldsymbol {g}} _ {\boldsymbol {\theta} _ {q (k)}} ^ {U _ {\beta}} \right| \right| _ {2} \leq L \left| \left| \boldsymbol {\theta} _ {q (k)} ^ {U _ {\alpha}} - \boldsymbol {\theta} _ {q (k)} ^ {U _ {\beta}} \right| \right| _ {2}, \left| \left| \bar {\boldsymbol {g}} _ {\boldsymbol {\theta}} ^ {U _ {\alpha}} - \bar {\boldsymbol {g}} _ {\boldsymbol {\theta}} ^ {U _ {\beta}} \right| \right| _ {2} \leq L \left| \left| \boldsymbol {\theta} ^ {U _ {\alpha}} - \boldsymbol {\theta} ^ {U _ {\beta}} \right| \right| _ {2}.
|
| 282 |
+
$$
|
| 283 |
+
|
| 284 |
+
Assumption 2. Bounded variance of the stochastic gradient, which means that $\forall s,\exists A > 0$ such that:
|
| 285 |
+
|
| 286 |
+
$$
|
| 287 |
+
\left\| \right.\left. \right.\left. \right.\left. \right.\left. \right.\left. \right.\left. \right.\left. \right.\left. \right.\left. \right.\left. \right.\left. \right.\left. \right.\left. \right.\left. \right.\left. \right.\left. \right.\left.\left.\left.\left.\left.\left.\left.\left.\left.\left.\left.\left.\left.\left.\left.\left.\left.\left. 1 _ {\theta_ {l}} ^ {U _ {s}}\right)\right| _ {2} ^ {U _ {s}}\right)\right| _ {2} ^ {U _ {s}}\right)\right| _ {2} ^ {U _ {s}}\right)\right| _ {2} ^ {U _ {s}}\right)\right| _ {2} ^ {U _ {s}}\right)\right| _ {2} ^ {U _ {s}}\right)\right| _ {\text {的}} ^ {U _ {s}}\right)\right| _ {2} ^ {U _ {s}} \right| _ {2} ^ {U _ {s}} \right\| _ {2} ^ {2} \leq A, w h i c h l e a d s t o \| g _ {\theta_ {q (k)}} ^ {U _ {s}} \| _ {2} ^ {2} \leq A, \| g _ {\theta} ^ {U _ {s}} \| _ {2} ^ {2} \leq A. \tag {23}
|
| 288 |
+
$$
|
| 289 |
+
|
| 290 |
+
Assumptions 1 and 2 are commonly adopted in convergence analysis of neural networks (see (Bottou et al., 2018; Huo et al., 2018b)). In particular, these assumptions do not assume convexity of function $f$ .
|
| 291 |
+
|
| 292 |
+
Theorem 1. Let Assumptions 1 and 2 hold. Suppose that the learning rate is non-increasing and $L\gamma_{s} \leq 1$ . The proposed ADL has the following lower bound:
|
| 293 |
+
|
| 294 |
+
$$
|
| 295 |
+
\mathbb {E} _ {\boldsymbol {x}} \left\{f \left(\boldsymbol {\theta} ^ {U _ {s + 1}}\right) \right\} - f \left(\boldsymbol {\theta} ^ {U _ {s}}\right) \leq - \frac {\gamma_ {s}}{2} \| \bar {\boldsymbol {g}} _ {\boldsymbol {\theta}} ^ {U _ {s}} \| _ {2} ^ {2} + \gamma_ {s} ^ {2} A L (1 + (1 / M) \sum_ {k = 1} ^ {K} \bar {d} _ {k}) / M. \tag {24}
|
| 296 |
+
$$
|
| 297 |
+
|
| 298 |
+
Proof. See supplementary material A.
|
| 299 |
+
|
| 300 |
+

|
| 301 |
+
|
| 302 |
+
Theorem 1 gives an important indication for convergence. If the RHS of Eq. (24) is negative, i.e.,
|
| 303 |
+
|
| 304 |
+
$$
|
| 305 |
+
\gamma_ {s} < \min \left\{1 / L, M \| \bar {\boldsymbol {g}} _ {\boldsymbol {\theta}} ^ {U _ {s}} \| _ {2} ^ {2} / (2 A L (1 + (1 / M) \sum_ {k = 1} ^ {K} \bar {d} _ {k})) \right\},
|
| 306 |
+
$$
|
| 307 |
+
|
| 308 |
+
the expected loss $\mathbb{E}_{\pmb{x}}\{f(\pmb{\theta}^{U_{s + 1}})\}$ decreases. We further give the convergence evidence in the following theorems. Detailed proofs of the following theorems can be found in the supplementary material $^3$
|
| 309 |
+
|
| 310 |
+
Theorem 2. Suppose Assumptions 1 and 2 hold, and the learning rate is non-increasing as well as satisfies $L\gamma_{s} \leq 1$ . Let $\theta^{*}$ be the global minimizer and $\mathbb{T}_S = \sum_{s=0}^{S-1}\gamma_s$ where $S$ indicates the network will be updated $S$ times. Then
|
| 311 |
+
|
| 312 |
+
$$
|
| 313 |
+
\begin{array}{l} (1 / \mathbb {T} _ {S}) \sum_ {s = 0} ^ {S - 1} \gamma_ {s} \mathbb {E} \left\{\left| \left| \bar {\boldsymbol {g}} _ {\boldsymbol {\theta}} ^ {U _ {s}} \right| \right| _ {2} ^ {2} \right\} \leq 2 (f (\boldsymbol {\theta} ^ {0}) - f (\boldsymbol {\theta} ^ {*})) / \mathbb {T} _ {S} \\ + (2 A L (1 + (1 / M) \sum_ {k = 1} ^ {K} \bar {d} _ {k}) \sum_ {s = 0} ^ {S - 1} \gamma_ {s} ^ {2}) / (M \mathbb {T} _ {S}). \tag {25} \\ \end{array}
|
| 314 |
+
$$
|
| 315 |
+
|
| 316 |
+
Proof. See supplementary material B.
|
| 317 |
+
|
| 318 |
+

|
| 319 |
+
|
| 320 |
+
The lower bound in Theorem 2 indicates that, for a randomly selected $\mathfrak{q}$ from $\{0,1,\dots ,S - 1\}$ with probability $\{\gamma_{\mathfrak{q}} / \mathbb{T}_S\}$ , $\mathbb{E}\{||\bar{\pmb{g}}_{\pmb{\theta}}^{U_s}||_2^2\}$ is bounded by the RHS of Eq. (25). More importantly, a larger $M$ leads to a smaller lower bound in Eq. (25) because the $\bar{d}_k$ decreases, and thus benefits the convergence. Another observation is that larger PS hinders the convergence as $\sum_{k = 1}^{K}\bar{d}_k$ increases. These observations are consistent with our understanding that the mGA helps the optimization by mitigating staleness, and partitioning the network into more modules is harmful.
|
| 321 |
+
|
| 322 |
+
Corollary 1. If $\gamma_{s}$ further satisfies $\lim_{S\to \infty}\mathbb{T}_S = \infty$ and $\lim_{S\to \infty}\sum_{s = 0}^{S - 1}\gamma_s^2 < \infty$ , the RHS of Eq. (25) converges to 0.
|
| 323 |
+
|
| 324 |
+
According to Corollary 1, by properly scheduling the learning rate, the lower bound for the expected gradient would converge to 0, i.e., $\lim_{S\to \infty}\mathbb{E}\{||\overline{\pmb{g}}_ {\theta}^{U_s}||_2^2\} = 0$ . That is, the proposed ADL can converge to critical points. Alternatively, the convergence can be revealed by setting a constant learning rate as indicated in the following theorem.
|
| 325 |
+
|
| 326 |
+
Theorem 3. Let Assumptions 1 and 2 hold. Suppose the learning rate is set as a constant:
|
| 327 |
+
|
| 328 |
+
$$
|
| 329 |
+
\gamma = \epsilon \sqrt {M (f (\boldsymbol {\theta} ^ {0}) - f (\boldsymbol {\theta} ^ {*})) / \left(S A L \left(1 + \sum_ {k = 1} ^ {K} \bar {d} _ {k}\right)\right)}
|
| 330 |
+
$$
|
| 331 |
+
|
| 332 |
+
where $\epsilon$ is a scaling factor such that $L\gamma \leq 1$ . Let $\theta^{*}$ be the global minimizer. Then we have
|
| 333 |
+
|
| 334 |
+
$$
|
| 335 |
+
\min _ {s \in \{0, 1, \dots , S - 1 \}} \mathbb {E} \left\{\| \bar {\boldsymbol {g}} _ {\boldsymbol {\theta}} ^ {U _ {*}} \| _ {2} ^ {2} \right\} \leq \frac {(2 + 2 \epsilon^ {2})}{\epsilon} \sqrt {A L (f (\boldsymbol {\theta} ^ {0}) - f (\boldsymbol {\theta} ^ {*})) (1 + (1 / M) \sum_ {k = 1} ^ {K} \bar {d} _ {k}) / (M S)}, \tag {26}
|
| 336 |
+
$$
|
| 337 |
+
|
| 338 |
+
where the lower bound converges to $0$ when $S \to \infty$ .
|
| 339 |
+
|
| 340 |
+
Proof. See supplementary material C
|
| 341 |
+
|
| 342 |
+

|
| 343 |
+
|
| 344 |
+
In summary, although the ADL attains model parallelism and introduces asynchronism, we show that our method can converge to critical points as well as revealing how it can be affected by the mGA and the PS.
|
| 345 |
+
|
| 346 |
+
Table 2. Validation errors $(\%)$ for (a) the ADL training RN-56 on CIFAR-10, and for the compared methods training various networks on (b) CIFAR-10 and (c) ImageNet. $\otimes$ indicates divergence.
|
| 347 |
+
|
| 348 |
+
<table><tr><td rowspan="8">(a)</td><td>M → 1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td><td>7</td><td>8</td><td>9</td><td>10</td><td>11</td><td>12</td></tr><tr><td>RN-56 (K=16)</td><td>⊗</td><td>⊗</td><td>⊗</td><td>6.38</td><td>6.90</td><td>6.92</td><td>6.79</td><td>6.49</td><td>6.78</td><td>6.44</td><td>6.65</td></tr><tr><td colspan="2">Architecture</td><td>BP</td><td>DDG</td><td>DGL</td><td>FR</td><td colspan="2">DSP*</td><td colspan="2">GPipe‡</td><td colspan="2">ADL</td></tr><tr><td colspan="2">RN-56 (K=2)</td><td>6.19</td><td>6.63</td><td>6.77</td><td>6.07</td><td colspan="2">-</td><td colspan="2">6.04</td><td colspan="2">5.99</td></tr><tr><td colspan="2">RN-56 (K=3)</td><td>6.19</td><td>6.50</td><td>8.88</td><td>6.33</td><td colspan="2">-</td><td colspan="2">5.94</td><td colspan="2">6.09</td></tr><tr><td colspan="2">RN-56 (K=4)</td><td>6.19</td><td>6.61</td><td>9.65</td><td>6.48</td><td colspan="2">-</td><td colspan="2">6.03</td><td colspan="2">6.16</td></tr><tr><td colspan="2">RN-56 (K=8)</td><td>6.19</td><td>⊗</td><td>13.26</td><td>6.64</td><td colspan="2">-</td><td colspan="2">6.08</td><td colspan="2">6.18</td></tr><tr><td colspan="2">RN-56 (K=16)</td><td>6.19</td><td>⊗</td><td>13.36</td><td>11.51</td><td colspan="2">-</td><td colspan="2">6.23</td><td colspan="2">6.38</td></tr><tr><td rowspan="7">(b)</td><td colspan="2">RN-110 (K=2)</td><td>5.79</td><td>6.26</td><td>6.26</td><td>5.76</td><td colspan="2">-</td><td colspan="2">5.70</td><td colspan="2">5.87</td></tr><tr><td colspan="2">RN-110 (K=8)</td><td>5.79</td><td>⊗</td><td>11.96</td><td>6.56</td><td colspan="2">-</td><td colspan="2">5.69</td><td colspan="2">5.80</td></tr><tr><td colspan="2">RN-98 (K=4)</td><td>6.01</td><td>6.19</td><td>9.7</td><td>6.01</td><td colspan="2">6.59</td><td colspan="2">6.00</td><td colspan="2">5.92</td></tr><tr><td colspan="2">RN-98 (K=8)</td><td>6.01</td><td>⊗</td><td>12.37</td><td>6.35</td><td colspan="2">-</td><td colspan="2">5.99</td><td colspan="2">5.93</td></tr><tr><td colspan="2">RN-34 (K=12)</td><td>4.50</td><td>⊗</td><td>9.56</td><td>⊗</td><td colspan="2">-</td><td colspan="2">4.65</td><td colspan="2">4.63</td></tr><tr><td colspan="2">RN-164 (K=4)</td><td>5.36</td><td>5.43</td><td>8.77</td><td>5.60</td><td colspan="2">5.58</td><td colspan="2">5.52</td><td colspan="2">5.46</td></tr><tr><td colspan="2">RN-164 (K=10)</td><td>5.36</td><td>5.58</td><td>10.70</td><td>5.89</td><td colspan="2">-</td><td colspan="2">5.39</td><td colspan="2">5.52</td></tr></table>
|
| 349 |
+
|
| 350 |
+
<table><tr><td>Architecture</td><td>BP</td><td>ADL</td></tr><tr><td>RN-18 (K=3)</td><td>29.79/10.92</td><td>29.52/10.42</td></tr><tr><td>RN-18 (K=4)</td><td>29.79/10.92</td><td>29.64/10.56</td></tr><tr><td>RN-18 (K=8)</td><td>29.79/10.92</td><td>29.75/10.55</td></tr><tr><td>RN-18 (K=10, max.)†</td><td>29.79/10.92</td><td>29.84/10.76</td></tr><tr><td>RN-50 (K=4)</td><td>23.65/7.13</td><td>23.43/7.45</td></tr><tr><td>SE-RN-18 (K=8)</td><td>29.09/9.89</td><td>29.01/10.14</td></tr><tr><td>SE-RN-18 (K=10, max.)†</td><td>29.09/9.89</td><td>29.07/10.32</td></tr></table>
|
| 351 |
+
|
| 352 |
+
* We only provide results from DSP's paper due to no available source code.
|
| 353 |
+
The GPipe is included as a synchronous baseline.
|
| 354 |
+
The largest PS with each module being one layer or one residual block.
|
| 355 |
+
|
| 356 |
+
# 6. Experiments
|
| 357 |
+
|
| 358 |
+
For validation we conduct learning of classification tasks on the CIFAR-10 (Krizhevsky & Hinton, 2009) and ImageNet 2012 (Russakovsky et al., 2015) datasets. We mainly focus on examining the generalization of networks trained by ADL in order to obtain empirical evidence of gradient staleness mitigation in decoupled learning. We then touch lightly on its acceleration performance and point out an imbalance issue affecting the acceleration, which is currently unresolved in the area of decoupled learning. Finally, we present the memory consumption. We compare our method with several state-of-the-arts, including DDG (Huo et al., 2018b), FR (Huo et al., 2018a), DGL (Belilovsky et al., 2020), DSP (Xu et al., 2020), and BP (Werbos, 1974). In particular, we include the GPipe (Huang et al., 2019) as a baseline for synchronous model parallelism. We focus on the comparison among methods of decoupled learning, and exclude the ASGD- and pipeline-based methods though they also involve gradient staleness.
|
| 359 |
+
|
| 360 |
+
Implementation details: The experiments are performed with PyTorch (Paszke et al., 2019) where we pre-process the datasets using standard data augmentation (i.e., random cropping, random horizontal flip and normalizing). We adopt the same training strategy for all the compared methods (except DSP due to no source code) for fairness. The SGD optimizer with a momentum of 0.9 is adopted, and gradual warm-up in (Goyal et al., 2017) for 3 epochs is
|
| 361 |
+
|
| 362 |
+
used. We adopt an overall batch size of 128 for all methods. Specially, since the mGA accumulates the data batches, for ADL the batch size in each iteration is set to $\lfloor 128 / M\rfloor$ . The GPipe adopts a similar setting with an overall batch size of 128 scattered into $M$ micro-batches. For CIFAR-10, the weight decay is set to $5\times 10^{-4}$ with an initial learning rate of 0.1, and the networks are trained for 300 epochs with the learning rate divided by 10 at 150, 225 and 275 epochs. For ImageNet, a $224\times x224$ crop is randomly sampled, and the weight decay is set to $1\times 10^{-4}$ with an initial learning rate of 0.05. We train the networks for 90 epochs, and divide the learning rate by 10 at 30, 60, and 80 epochs. Finally, the validation results are reported by the median of 3 runs.
|
| 363 |
+
|
| 364 |
+
Datasets: The CIFAR-10 dataset includes 32x32 color images with 10 classes, and has 50000 and 10000 samples for training and validation respectively. The ImageNet dataset contains 1000 classes, with 1.28 million and 50000 images of various sizes for training and validation.
|
| 365 |
+
|
| 366 |
+
# 6.1. Generalization Performance
|
| 367 |
+
|
| 368 |
+
Firstly, we evaluate the impact of mGA by training RN-56 $(K = 16)$ on CIFAR-10 with various $M$ . As shown in Table 2(a), the ADL diverges with $M \leq 3$ due to strong gradient staleness. Once the learning converges, the ADL performs rather robustly to various $K$ . For convenience, we pick $M = 4$ (i.e., reducing $75\%$ staleness as shown in Eq. (19)) for the ADL in the following experiments, which should stabilize the decoupled learning for $K \leq 16$ .
|
| 369 |
+
|
| 370 |
+
Later on, on CIFAR-10 we train various RN architectures with different PS ranging from $K = 2$ to $K = 16$ . The classification results are shown in Table 2(b). In general our ADL delivers results comparable to or better than those from the compared methods including the synchronous methods (i.e., BP and GPipe), while the other methods of decoupled learning would encounter learning issues as $K$ increases. The GPipe shows results as good as the BP baselines without receiving visible impacts as PS increases. This is reasonable as its synchronous nature does not invite gradient staleness. The DGL experiences a growing loss of performance as the PS increases, e.g., $6.77\% \rightarrow 9.65\% \rightarrow 13.26\%$ for RN-56 with $K = 2,4,8$ respectively. The DGL severs the gradient flow between adjacent modules. It renders the lower modules under-developed without any feedback from the upper modules, causing inferior learning results to the BP baselines.
|
| 371 |
+
|
| 372 |
+
The FR gives promising results for small PS (e.g., $K = 2$ ) which is consistent with the claim made in (Huo et al., 2018a). However, its performance shows a significant deterioration when training with a very large PS. For instance, training RN-56 with $K = 16$ leads to an error rate of $11.51\%$ which is significantly worse than that of $6.07\%$ with $K = 2$ . In the case of RN-34 ( $K = 12$ ), the FR even fails to con
|
| 373 |
+
|
| 374 |
+

|
| 375 |
+
Figure 3. Learning curves of compared methods for training RN-34 with $K = 12$ in sub-figures (a) and (e), and RN-56 with $K = 16$ in sub-figures (b) and (d), as well as the error rates w.r.t. PS in sub-figure (e) on RN-18. Note that the DSP is excluded as no source code is available.
|
| 376 |
+
|
| 377 |
+

|
| 378 |
+
|
| 379 |
+

|
| 380 |
+
|
| 381 |
+
verge. The performance drop or divergence is likely resulted from the recomputation unit in FR which adopts inconsistent weights during the forward and backward passes. The DSP gives a moderate performance for training RN-98. We cannot provide other corresponding results as no source code is released and it is difficult to reproduce the algorithm based on the manuscript, but we would expect a similar behaving pattern of DSP to that of FR as they share the same recomputation technique.
|
| 382 |
+
|
| 383 |
+
The DDG, on the other hand, is more prone to divergence. As shown in Table 2(b), the trainings of RN-56, ResNet98 and RN-110 exhibit several divergence cases (mainly happen with large PS, e.g., $K \geq 8$ ), while our method successfully trains these networks with comparable performance to the BP baselines. However, when the DDG does converge (e.g., training RN-164 with $K = 10$ ), unlike the FR or DGL with rather sharply decreased performance, it tends to deliver very close results to those of BP (e.g., $5.58\%$ versus $5.36\%$ ). Such an observation reveals that increasing staleness invites instability yet it only affects the generalization to a limited extent. This empirically supports and further justifies our motivation of developing the proposed ADL aiming to mitigate gradient staleness thereby avoiding divergence.
|
| 384 |
+
|
| 385 |
+
We provide the learning curves of RN-34 ( $K = 12$ ) and RN-56 ( $K = 16$ ) in Figure 3. The DDG diverges in both cases, i.e., at epoch 48 and 2 for RN-34 and RN-56. The FR diverges at epoch 2 for RN-34, and experiences a "bumpy" convergence for RN-56. Our ADL converges smoothly in both cases while achieving equally good validation results to the BP baselines. We also observe a clear pattern in these learning curves showing that the ADL gives a slightly slower convergence than that from BP at the beginning of network learning (see the zoomed plots in Figure 3). This is consistent with our theoretical findings (e.g., Theorem 2) suggesting the decoupled learning would slow down the convergence especially with a large PS. We also provide
|
| 386 |
+
|
| 387 |
+
the study to examine the impact of PS on the error rate by training RN-18 with various $K$ . Figure 3(e) shows that our method is relatively robust to the change of $K$ with the staleness mitigated, while the compared methods encounter much rapid rise of errors as $K$ increases.
|
| 388 |
+
|
| 389 |
+
We also provide the results of ADL trained on ImageNet in comparison with BP. We train several architectures from ResNet and SENet (Hu et al., 2018), and report both Top1 and Top5 error rates. As shown in Table 2(c), in general the ADL outperforms its BP counterpart even for a large PS of $K = 10$ . The observation of ADL outperforming the BP can be explained as follows. The ADL adopts DGs, which can be treated as real gradients obtained by BP, yet contaminated with noises drawn from an unknown distribution. Although the contaminated gradients could slow down the convergence (e.g., see the zoomed plots in Figure 3), they could however improve the network's generalization with their uncertainties (see (Neelakantan et al., 2015)). The mGA restraints the staleness so it does not go out of bound while keeping its uncertainty, thereby leading to more promising results in certain cases.
|
| 390 |
+
|
| 391 |
+
# 6.2. Acceleration Performance
|
| 392 |
+
|
| 393 |
+
Here we show the acceleration performance of the ADL in comparison with other methods of decoupled learning by training RN-101 on ImageNet with various $K$ . The experiments are conducted on a server with Tesla V100 GPUs with each module running in a separate GPU worker. Batch size is adjusted to maximize the training speed, and the network split locations are tuned to distribute the computational workload (i.e., running time for each iteration) as evenly as possible in each GPU. Note that the mGA has little effect on the running speed, so we set $M = 4$ as usual.
|
| 394 |
+
|
| 395 |
+
As shown in Table 3, by addressing all three lockings of BP, the ADL achieves a remarkable acceleration over BP and is much faster than the DDG and FR which address only the backward locking. For instance, for $K = 2$ the ADL
|
| 396 |
+
|
| 397 |
+
<table><tr><td colspan="8">Table 3. Speedups (over BP) in training RN-101 (ImageNet).</td></tr><tr><td></td><td>BP</td><td>DDG</td><td>FR</td><td>DGL</td><td>DSP</td><td>GPipe</td><td>AD</td></tr><tr><td>K=2</td><td>1×</td><td>1.32×</td><td>1.19×</td><td>1.82×</td><td>-</td><td>1.41×</td><td>1.92</td></tr><tr><td>K=3</td><td>1×</td><td>1.57×</td><td>1.29×</td><td>2.61×</td><td>-</td><td>1.87×</td><td>2.69</td></tr><tr><td>K=4</td><td>1×</td><td>1.68×</td><td>1.45×</td><td>3.39×</td><td>2.70×</td><td>2.23×</td><td>3.32</td></tr></table>
|
| 398 |
+
|
| 399 |
+
Table 4. Memory consumption of the compared methods.
|
| 400 |
+
|
| 401 |
+
<table><tr><td></td><td>BP</td><td>DDG</td><td>FR</td><td>DGL</td><td>GPipe</td><td>ADL</td></tr><tr><td>Mem. (GB)</td><td>1.14</td><td>1.69</td><td>1.46</td><td>1.49</td><td>1.52</td><td>1.84</td></tr></table>
|
| 402 |
+
|
| 403 |
+
accelerates the learning with a speedup of $1.92 \times$ while the DDG and FR only obtain $1.32 \times$ and $1.19 \times$ respectively. Although the DSP also addresses all the unlockings, it is lower than the ADL due to the recomputation unit that demands extra computing power. The DGL delivers comparable acceleration results to those of ADL, but it leads to weaker generalization performance (see Table 2(b)). In comparison, the GPipe is slower than our method due to the introduced computational "bubbles" (Huang et al., 2019) resulted from the synchronization.
|
| 404 |
+
|
| 405 |
+
Here we only demonstrate the acceleration results for $K \leq 4$ following the DDG, FR and DSP without moving forward to $K \geq 5$ . This is because the depth-wise partition introduces imbalance of computation workload among different workers since the smallest partitionable unit a either one layer or one residual block. The imbalance increases rapidly with larger $K$ leading to inefficiency in decoupled learning. For instance, 2 GPUs accelerate the learning to $1.92 \times$ but doubling the resources to 4 GPUs only receives $3.32 \times$ instead of a double acceleration, indicating that certain modules are idle for some time. Our ADL obtains the fastest learning speed, but still requires a further assistance on balancing the workload distribution if we are to fully demonstrate its acceleration potential (e.g., obtaining a speedup of $\approx 7.5 \times$ for $K = 8$ ). Addressing or mitigating the imbalance issue is not trivial and is to be investigated in future work.
|
| 406 |
+
|
| 407 |
+
# 6.3. Memory Analysis
|
| 408 |
+
|
| 409 |
+
Here we conduct a brief comparison regarding the memory consumption among the compared methods. The results are measured based on the average GPU memory in training RN-18 of $K = 4$ with a batch size of 128. As indicated in Table 4, the compared methods used more memory than that used by BP. Compared with the FR and GPipe, the DDG and ADL require additional memory. This is because, unlike the trainings in FR and GPipe, no recomputation is required, which however allows them, especially the ADL, to run much faster (e.g., $3.32 \times$ of ADL v.s. $1.45 \times$ of FR for 4 GPUs). Reducing the memory usage without affecting the ADL's generalization and speed is a nontrivial task, which will be our future work.
|
| 410 |
+
|
| 411 |
+
# 7. Conclusion
|
| 412 |
+
|
| 413 |
+
In this paper, we proposed an accumulated decoupled learning (ADL) algorithm to address the inefficiency in BP lockings thereby achieving model parallelism. Particularly, a module-wise gradient accumulation (mGA) technique, which mitigates the gradient staleness that hinders the scaling ability of decoupled learning, has been incorporated. The effect of mitigation has been demonstrated theoretically and empirically. Theoretically, we showed that the ADL converged to critical points, and also demonstrated how the partition size (PS) and the mGA could affect the lower bound categorizing the convergence. The classification tasks conducted validated our claim as the ADL outperformed the state-of-the-art counterparts especially in the case of a large PS. We finally showed the running speed, demonstrating a remarkable acceleration for the ADL as a model parallelism tool.
|
| 414 |
+
|
| 415 |
+
# Acknowledgment
|
| 416 |
+
|
| 417 |
+
We thank the anonymous reviewers for their very constructive comments for improving this manuscript. This work was supported in part by the Science and Engineering Research Council, Agency of Science, Technology and Research, Singapore, through the National Robotics Program under Grant 1922500054.
|
| 418 |
+
|
| 419 |
+
# References
|
| 420 |
+
|
| 421 |
+
Belilovsky, E., Eickenberg, M., and Oyallon, E. Decoupled greedy learning of CNNs. In Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pp. 736-745. PMLR, 13-18 Jul 2020.
|
| 422 |
+
Bottou, L., Curtis, F. E., and Nocedal, J. Optimization methods for large-scale machine learning. Siam Review, 60(2):223-311, 2018.
|
| 423 |
+
Dean, J., Corrado, G., Monga, R., Chen, K., Devin, M., Mao, M., Senior, A., Tucker, P., Yang, K., Le, Q. V., et al. Large scale distributed deep networks. In Advances in neural information processing systems, pp. 1223-1231, 2012.
|
| 424 |
+
Goyal, P., Dólar, P., Girshick, R., Noordhuis, P., Wesolowski, L., Kyrola, A., Tulloch, A., Jia, Y., and He, K. Accurate, large minibatch sgd: TrainingImagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017.
|
| 425 |
+
He, K., Zhang, X., Ren, S., and Sun, J. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.
|
| 426 |
+
|
| 427 |
+
Hu, J., Shen, L., and Sun, G. Squeeze-and-excitation networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 7132-7141, 2018.
|
| 428 |
+
Huang, Y., Cheng, Y., Bapna, A., First, O., Chen, D., Chen, M., Lee, H., Ngiam, J., Le, Q. V., Wu, Y., et al. Gpipe: Efficient training of giant neural networks using pipeline parallelism. In Advances in Neural Information Processing Systems, pp. 103-112, 2019.
|
| 429 |
+
Huo, Z., Gu, B., and Huang, H. Training neural networks using features replay. In Advances in Neural Information Processing Systems, pp. 6659-6668, 2018a.
|
| 430 |
+
Huo, Z., Gu, B., Huang, H., et al. Decoupled parallel backpropagation with convergence guarantee. In International Conference on Machine Learning, pp. 2103-2111, 2018b.
|
| 431 |
+
Jaderberg, M., Czarnecki, W., Osindero, S., Vinyals, O., Graves, A., Silver, D., and Kavukcuoglu, K. Decoupled neural interfaces using synthetic gradients. In ICML, 2016.
|
| 432 |
+
Keskar, N. S., Mudigere, D., Nocedal, J., Smelyanskiy, M., and Tang, P. T. P. On large-batch training for deep learning: Generalization gap and sharp minima. arXiv preprint arXiv:1609.04836, 2016.
|
| 433 |
+
Krizhevsky, A. and Hinton, G. Learning multiple layers of features from tiny images. Technical report, 2009.
|
| 434 |
+
LeCun, Y., Bottou, L., Bengio, Y., Haffner, P., et al. Gradient-based learning applied to document recognition. Proceedings of the IEEE, 86(11):2278-2324, 1998.
|
| 435 |
+
Lian, X., Huang, Y., Li, Y., and Liu, J. Asynchronous parallel stochastic gradient for nonconvex optimization. In Advances in Neural Information Processing Systems, pp. 2737-2745, 2015.
|
| 436 |
+
Mostafa, H., Ramesh, V., and Cauwenberghs, G. Deep supervised learning using local errors. Frontiers in neuroscience, 12:608, 2018.
|
| 437 |
+
Neelakantan, A., Vilnis, L., Le, Q. V., Sutskever, I., Kaiser, L., Kurach, K., and Martens, J. Adding gradient noise improves learning for very deep networks. arXiv preprint arXiv:1511.06807, 2015.
|
| 438 |
+
Nokland, A. and Eidnes, L. H. Training neural networks with local error signals. arXiv preprint arXiv:1901.06656, 2019.
|
| 439 |
+
Paszke, A., Gross, S., Massa, F., Lerer, A., Bradbury, J., Chanan, G., Killeen, T., Lin, Z., Gimelshein, N., Antiga, L., Desmaison, A., Kopf, A., Yang, E., DeVito, Z., Raison, M., Tejani, A., Chilamkurthy, S., Steiner, B., Fang, L.,
|
| 440 |
+
|
| 441 |
+
Bai, J., and Chintala, S. Pytorch: An imperative style, high-performance deep learning library. In Advances in Neural Information Processing Systems 32, pp. 8024-8035. Curran Associates, Inc., 2019.
|
| 442 |
+
Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M., et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3): 211-252, 2015.
|
| 443 |
+
Werbos, P. Beyond regression: New tools for prediction and analysis in the behavioral sciences. Ph.D. dissertation, Harvard University, 1974.
|
| 444 |
+
Xu, A., Huo, Z., and Huang, H. On the acceleration of deep learning model parallelism with staleness. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 2088-2097, 2020.
|
| 445 |
+
Zheng, S., Meng, Q., Wang, T., Chen, W., Yu, N., Ma, Z.-M., and Liu, T.-Y. Asynchronous stochastic gradient descent with delay compensation. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 4120-4129. JMLR.org, 2017.
|
| 446 |
+
Zhuang, H., Wang, Y., Liu, Q., and Lin, Z. Fully decoupled neural network learning using delayed gradients. IEEE Transactions on Neural Networks and Learning Systems, pp. 1-8, 2021. doi: 10.1109/TNNLS.2021.3069883.
|
accumulateddecoupledlearningwithgradientstalenessmitigationforconvolutionalneuralnetworks/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f2903f6ed485caa67564523dad9253c8f2b5ff23405c5a7ac4c6fece7ec873f
|
| 3 |
+
size 569464
|
accumulateddecoupledlearningwithgradientstalenessmitigationforconvolutionalneuralnetworks/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:683cb6a19201cdc307875af923e6996ef955306dd31bdedc9c2a7b9d9212c47b
|
| 3 |
+
size 540594
|
accuracyinterpretabilityanddifferentialprivacyviaexplainableboosting/e778803f-5d9e-4354-9df2-15268aa14b64_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:063e222a2729a91cda6a32dfbff7cae36fcebc54874c93a4a4806717f388cd7a
|
| 3 |
+
size 69357
|
accuracyinterpretabilityanddifferentialprivacyviaexplainableboosting/e778803f-5d9e-4354-9df2-15268aa14b64_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e21e042a85c318d5cda85c16fec1528b5f3c0a5a83fe3670c39ffcfe3b863d03
|
| 3 |
+
size 85360
|
accuracyinterpretabilityanddifferentialprivacyviaexplainableboosting/e778803f-5d9e-4354-9df2-15268aa14b64_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a69fccdbe02cee8f2e40bcd3ae655805ef9b4f1b67f7a1bbd34207222f94b49a
|
| 3 |
+
size 648306
|
accuracyinterpretabilityanddifferentialprivacyviaexplainableboosting/full.md
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accuracy, Interpretability, and Differential Privacy via Explainable Boosting
|
| 2 |
+
|
| 3 |
+
Harsha Nori Rich Caruana Zhiqi Bu Judy Hanwen Shen Janardhan Kulkarni
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
We show that adding differential privacy to Explainable Boosting Machines (EBMs), a recent method for training interpretable ML models, yields state-of-the-art accuracy while protecting privacy. Our experiments on multiple classification and regression datasets show that DP-EBM models suffer surprisingly little accuracy loss even with strong differential privacy guarantees. In addition to high accuracy, two other benefits of applying DP to EBMs are: a) trained models provide exact global and local interpretability, which is often important in settings where differential privacy is needed; and b) the models can be edited after training without loss of privacy to correct errors which DP noise may have introduced.
|
| 8 |
+
|
| 9 |
+
# 1. Introduction
|
| 10 |
+
|
| 11 |
+
Security researchers have repeatedly shown that machine learning models can leak information about training data (Carlini et al., 2018; Melis et al., 2019). In industries like healthcare, finance and criminal justice, models are trained on sensitive information, and this form of leakage can be especially disastrous. To combat this, researchers have embraced differential privacy, which establishes a strong mathematical standard for privacy guarantees on algorithms (Dwork et al., 2006; 2014). In many of these high-stakes situations, model interpretability is also important to provide audits, help domain experts such as doctors vet the models, and to correct unwanted errors before deployment (Caruana et al., 2015; Rudin, 2019). In this paper, we address both concerns by developing a private algorithm for learning Generalized Additive Models (GAMs) (Hastie & Tibshirani, 1990). We show that this method can provide strong privacy guarantees, high accuracy, and exact global and local interpretability on tabular datasets.
|
| 12 |
+
|
| 13 |
+
<sup>1</sup>Microsoft, Redmond, USA. <sup>2</sup>University of Pennsylvania, Philadelphia, USA. <sup>3</sup>Stanford University, Palo Alto, USA. Correspondence to: Harsha Nori <hanori@microsoft.com>.
|
| 14 |
+
|
| 15 |
+
Proceedings of the $38^{th}$ International Conference on Machine Learning, PMLR 139, 2021. Copyright 2021 by the author(s).
|
| 16 |
+
|
| 17 |
+
While GAMs were traditionally fit using smooth low-order splines (Hastie & Tibshirani, 1990), we focus on Explainable Boosting Machines (EBMs), a modern implementation that learns shape functions using boosted decision trees (Lou et al., 2012; Nori et al., 2019). EBMs are especially interesting because they often match the accuracy of complex blackbox algorithms like XGBoost and random forests, while having a simple optimization procedure and final structure (Chang et al., 2020; Wang et al., 2020).
|
| 18 |
+
|
| 19 |
+
Our main contributions for this paper are:
|
| 20 |
+
|
| 21 |
+
- We introduce DP-EBMs, a differentially private version of EBMs, and provide a rigorous privacy analysis of this algorithm using the recently introduced GDP framework (Dong et al., 2019).
|
| 22 |
+
- Our experimental results on tabular classification and regression problems show that DP-EBMs significantly outperform other DP learning methods. For example, at $\varepsilon = 0.5$ , DP-EBMs have at most a 0.05 loss in AUC compared to non-private EBMs on benchmark datasets.
|
| 23 |
+
- We demonstrate how combining interpretability with differential privacy can address common concerns with DP in practice by enabling users to repair some of the impact of noise on the model and enforce desirable constraints like monotonicity.
|
| 24 |
+
|
| 25 |
+
Before diving into details in the following sections, Figure 1 provides a quick peek at the empirical results. In summary, DP-EBMs outperform other differentially private learning methods on a variety of classification and regression tasks for many reasonable values of $\varepsilon$ .
|
| 26 |
+
|
| 27 |
+
# 2. Preliminaries
|
| 28 |
+
|
| 29 |
+
# 2.1. Explainable Boosting Machines
|
| 30 |
+
|
| 31 |
+
Explainable Boosting Machines belong to the family of Generalized Additive Models (GAMs), which are restricted machine learning models that have the form:
|
| 32 |
+
|
| 33 |
+
$$
|
| 34 |
+
g (E [ y ]) = \beta + f _ {0} (x _ {0}) + f _ {1} (x _ {1}) + \dots f _ {k} (x _ {k})
|
| 35 |
+
$$
|
| 36 |
+
|
| 37 |
+
where $\beta$ is an intercept, each $f_{j}$ is a univariate function that operates on a single input feature $x_{j}$ , and $g$ is a link function that adapts the model to different settings like classification and regression (Hastie & Tibshirani, 1990).
|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
Figure 1. Comparison of two variants of DP-EBMs with other DP algorithms on four classification datasets. DP-EBMs significantly outperform other methods in every setting. For the full experimental setup and comparisons on regression models, please see Section 4.
|
| 41 |
+
|
| 42 |
+
While GAMs are more flexible than linear models (where each function $f_{j}$ is further restricted to be linear), they are significantly less flexible than most machine learning models due to their inability to learn high-order interactions between features (e.g. $f(x_0, x_1, x_2)$ ). This restricted additive structure has the benefit of allowing GAMs to provide exact interpretability. At prediction time, each feature contributes a score, which are then summed and passed through a link function. These scores show exactly what each feature contributes to the prediction, and can be sorted, compared, and reasoned about (Lundberg & Lee, 2017). In addition, each function $f_{k}$ can be visualized to provide an exact global description of how the model operates across varying inputs.
|
| 43 |
+
|
| 44 |
+
EBMs are a recent, popular open-source implementation of boosted-tree GAMs (Nori et al., 2019). We extend the EBM package to include DP-EBMs<sup>1</sup>, which makes DP-EBMs as easy to use as regular EBMs or any scikit-learn model.
|
| 45 |
+
|
| 46 |
+
The EBM training procedure begins by bucketing data from continuous features into discrete bins, ensuring that each bin has approximately equal amounts of data. This preprocessing step is a common optimization in tree-based learning algorithms, and is used by popular packages like LightGBM and XGBoost (Ke et al., 2017; Chen & Guestrin, 2016). The most time consuming part of training a decision tree is finding the best split; discretizing the data before growing trees reduces the search space for splits which can significantly speed up learning with little cost in accuracy.
|
| 47 |
+
|
| 48 |
+
After pre-processing, the goal is to learn shape functions $f_{k}$ for each feature. In traditional boosting, each tree greedily searches the feature space for the next best feature to split. In contrast, EBMs use cyclic gradient boosting to visit each
|
| 49 |
+
|
| 50 |
+

|
| 51 |
+
Figure 2. A single iteration of cyclic boosting, showing how each tree operates on pre-processed data and gets collapsed into the univariate shape function $f_{k}$ .
|
| 52 |
+
|
| 53 |
+
feature in round-robin fashion. To enforce additivity, each tree is only allowed to use one feature, thus preventing interactions from being learned (Lou et al., 2012).
|
| 54 |
+
|
| 55 |
+
Cyclic boosting begins by growing a shallow decision tree on the first feature in the dataset. The predictions the tree makes on each bin of the histogram are then multiplied by a low learning rate, and these weak predictions for each bin of data become the initial shape function for the first feature. The process then iterates to the second feature, where a tree is trained to predict on the residuals (the remaining error) of the first feature's model. Once a shallow tree has been learned for every feature, the boosting process cycles back to the first feature and continues in a round robin fashion for all $E$ epochs to jointly optimize all functions. The pseudocode for this algorithm can be found in Algorithm 1.
|
| 56 |
+
|
| 57 |
+
Algorithm 1 Explainable Boosting
|
| 58 |
+
1: Input: data $X$ , labels $y$ , epochs $E$ , learning rate $\eta$ , max splits $m$
|
| 59 |
+
2: Output: 1d functions $f_{k}$ per feature
|
| 60 |
+
3:
|
| 61 |
+
4: $t = 0$
|
| 62 |
+
5: Initialize residuals: $r_i^t = y_i$
|
| 63 |
+
6: for feature $0 \dots K$ do
|
| 64 |
+
7: Bin data: $H_{k} = \text{Bin}(X(:, k])$
|
| 65 |
+
8: Initialize output function: $f_{k}^{t} = [0, \dots, 0]$
|
| 66 |
+
9: end for
|
| 67 |
+
10:
|
| 68 |
+
11: for epoch $1 \dots, E$ do
|
| 69 |
+
12: for feature $0, \dots, K$ do
|
| 70 |
+
13: $t += 1$
|
| 71 |
+
14: Select best splits $S_{0}, \dots, S_{m}$
|
| 72 |
+
15: for split $\ell \in \{0, \dots, m\}$ do
|
| 73 |
+
16: Sum residuals: $T = \eta \cdot \sum_{b \in S_{\ell}} \sum_{x_i \in H_k(b)} r_i^t$
|
| 74 |
+
17: ...
|
| 75 |
+
18: Calculate average: $\mu = \frac{T}{\sum_{b \in S_{\ell}} H_k(b)}$
|
| 76 |
+
19: for each histogram bin $b \in S_{\ell}$ do
|
| 77 |
+
20: Update output function: $f_{k}^{t}(b) = f_{k}^{t}(b) + \mu$
|
| 78 |
+
21: ...
|
| 79 |
+
22: end for
|
| 80 |
+
23: end for
|
| 81 |
+
24: for each data point $x_i$ do
|
| 82 |
+
25: Residuals: $r_i^{t+1} = y_i - \sum_k f_k^t (\rho(H_k, x_i))$
|
| 83 |
+
26: end for
|
| 84 |
+
27: end for
|
| 85 |
+
28: end for
|
| 86 |
+
|
| 87 |
+
# 2.2. Differential Privacy
|
| 88 |
+
|
| 89 |
+
Here we state some basic results in Differential Privacy (DP) that we use in our analysis.
|
| 90 |
+
|
| 91 |
+
Definition 1 (Differential Privacy). A randomized algorithm $\mathcal{A}$ is $(\varepsilon, \delta)$ -differentially private if for all neighboring databases $D_{1}, D_{2} \in D^{n}$ , and for all sets $\mathcal{S}$ of possible outputs:
|
| 92 |
+
|
| 93 |
+
$$
|
| 94 |
+
\Pr [ \mathcal {A} (D _ {1}) \in \mathcal {S} ] \leq e ^ {\varepsilon} \Pr [ \mathcal {A} (D _ {2}) \in \mathcal {S} ] + \delta \tag {1}
|
| 95 |
+
$$
|
| 96 |
+
|
| 97 |
+
Theorem 1 (Gaussian Mechanism (Dwork et al., 2014)). Given any function $f: D \to \mathbb{R}^k$ , the Gaussian Mechanism is defined as:
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\mathcal {M} (x, f (.), \varepsilon , \delta) = f (x) + \left(Y _ {i}, \dots , Y _ {k}\right) \tag {2}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
where $\Delta_{2}$ is the $\ell_2$ -sensitivity and $Y_{i}$ are i.i.d. random variables drawn from $\mathcal{N}(0,\sigma^2)$ . The Gaussian Mechanism is $(\varepsilon, \delta)$ -differentially private when $\sigma > \sqrt{2\ln 1.25 / \delta} \Delta_{2} / \varepsilon$ and $\varepsilon \in (0,1)$ .
|
| 104 |
+
|
| 105 |
+
Algorithm 2 Differentially Private Explainable Boosting
|
| 106 |
+
1: Input: data $X$ , labels $y$ , epochs $E$ , learning rate $\eta$ , max splits $m$ , range of labels $R$ , privacy parameters $\varepsilon$ , $\delta$
|
| 107 |
+
2: Output: 1d functions $f_{k}$ per feature
|
| 108 |
+
3:
|
| 109 |
+
4: $t = 0$
|
| 110 |
+
5: Initialize residuals: $r_i^t = y_i$
|
| 111 |
+
6: for feature 0...K do
|
| 112 |
+
7: Privately bin data: $\hat{H}_k = DPBin(X(:, k], \varepsilon_{bin})$
|
| 113 |
+
8: Initialize output function: $f_k^t = [0, \dots, 0]$
|
| 114 |
+
9: end for
|
| 115 |
+
10:
|
| 116 |
+
11: for epoch 1..., E do
|
| 117 |
+
12: for feature 0, ..., K do
|
| 118 |
+
13: $t += 1$
|
| 119 |
+
14: Randomly select splits $S_0, \dots, S_m$
|
| 120 |
+
15: for split $\ell \in \{0, \dots, m\}$ do
|
| 121 |
+
16: Sum residuals: $T = \eta \cdot \sum_{b \in S_\ell} \sum_{x_i \in \hat{H}_k(b)} r_i^t$
|
| 122 |
+
17: Add noise: $\hat{T} = T + \sigma \cdot \eta R \cdot \mathcal{N}(0, 1)$
|
| 123 |
+
18: Calculate private average: $\mu = \frac{\hat{T}}{\sum_{b \in S_\ell} \hat{H}_k(b)}$
|
| 124 |
+
19: for each histogram bin $b \in S_\ell$ do
|
| 125 |
+
20: Update output function: $f_k^t(b) = f_k^t(b) + \mu$
|
| 126 |
+
21: We release $f_k^t(b)$ values publicly.
|
| 127 |
+
22: end for
|
| 128 |
+
23: end for
|
| 129 |
+
24: for each data point $x_i$ do
|
| 130 |
+
25: Residuals: $r_i^{t+1} = y_i - \sum_k f_k^t (\rho(\hat{H}_k, x_i))$
|
| 131 |
+
26: end for
|
| 132 |
+
27: end for
|
| 133 |
+
28: end for
|
| 134 |
+
|
| 135 |
+
One of the main strengths of DP are the composition theorems, which analyze the cumulative privacy guarantee when applying many differentially private mechanisms.
|
| 136 |
+
|
| 137 |
+
# Theorem 2. (Theorem 4.3 from (Kairouz et al., 2017))
|
| 138 |
+
|
| 139 |
+
For $\Delta > 0$ , $\varepsilon > 0$ and $\delta \in [0,1]$ , the mechanism that adds Gaussian noise with variance:
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
8 k \Delta^ {2} \log (e + (\varepsilon / \delta)) / \varepsilon^ {2} \tag {3}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
satisfies $(\varepsilon, \delta)$ -differential privacy under $k$ -fold adaptive composition.
|
| 146 |
+
|
| 147 |
+
A qualitative way to understand the above theorem is that if there are $k$ differentially private mechanisms each of which is $(\varepsilon, \delta)$ -DP acting on the same data set, then the overall privacy loss is roughly $\varepsilon \cdot \sqrt{k}$ .
|
| 148 |
+
|
| 149 |
+
Unfortunately, Theorem 2 is not the optimal bound one can achieve on the composition of private mechanisms. A tighter analysis of composition for Gaussian mechanisms, called Gaussian Differential Privacy (GDP), was recently proposed by (Dong et al., 2019). In our experiments, GDP analysis gave us better privacy bounds. We summarize
|
| 150 |
+
|
| 151 |
+
the main theorems we borrow from (Dong et al., 2019) below. For completeness, we compare the results from both composition methods ("EBM-Classic" and "EBM-GDP") in Tables 2 and 3.
|
| 152 |
+
|
| 153 |
+
Theorem 3. For a dataset $D$ , define the Gaussian mechanism that operates on a univariate statistic $\theta$ with sensitivity $\Delta$ as $M(D) = \theta(D) + \text{Noise}$ , where $\text{Noise}$ is sampled from a Gaussian distribution $\mathcal{N}(0, \Delta^2 / \mu^2)$ . Then, $M$ is $\mu$ -GDP.
|
| 154 |
+
|
| 155 |
+
If $M_1, M_2, \ldots, M_k$ are $k$ GDP mechanisms with parameters $\mu_1, \mu_2, \ldots, \mu_k$ , then the following GDP composition theorem holds:
|
| 156 |
+
|
| 157 |
+
Theorem 4. The $k$ -fold composition of $\mu_{i}$ -GDP mechanisms is $\sqrt{\mu_1^2 + \mu_2^2 + \ldots \mu_k^2}$ -GDP.
|
| 158 |
+
|
| 159 |
+
Finally, one can convert GDP guarantees to the standard $(\epsilon ,\delta)$ -DP guarantee using the following theorem:
|
| 160 |
+
|
| 161 |
+
Theorem 5. A mechanism is $\mu$ -GDP if and only if it is $(\varepsilon, \delta)$ -DP where
|
| 162 |
+
|
| 163 |
+
$$
|
| 164 |
+
\delta = \phi (- \frac {\varepsilon}{\mu} + \frac {\mu}{2}) - e ^ {\varepsilon} \phi (- \frac {\varepsilon}{\mu} - \frac {\mu}{2})
|
| 165 |
+
$$
|
| 166 |
+
|
| 167 |
+
Besides the mathematical elegance, a key advantage of GDP is that it provides a tighter analysis of composition guarantees of differentially private mechanisms.
|
| 168 |
+
|
| 169 |
+
# 2.3. Notation
|
| 170 |
+
|
| 171 |
+
For rest of the paper, we adopt the following notation. We denote by $H_{k}$ the histogram for feature $k$ and $\hat{H}_k$ for the corresponding differentially private histogram. We use $K$ to denote the total number of features. By a slight abuse of notation, we write $x_{i}\in H_{k}(b)$ to mean that the data point $x_{i}$ belongs to the histogram bin $b$ , and use $\rho (H_k,x_i)$ to look up the bin $^b$ such that $x_{i}\in H_{k}(b)$ .
|
| 172 |
+
|
| 173 |
+
# 3. Algorithms
|
| 174 |
+
|
| 175 |
+
To add differential privacy guarantees to EBMs, we modify the EBM training procedure Algorithm 1, yielding DP-EBM Algorithm 2. We first modify the pre-processing procedure to generate differentially private bins (published as histograms $\hat{H}_k$ per feature), which log the bin ranges and how many data points fall in each bin (line 7). Next, we analyze the boosting process. In traditional tree building, there are two major data-intensive operations: learning the structure of the tree (what feature and feature threshold to install at each node in the tree), and calculating the predicted value of each leaf node (Breiman et al., 1984). Prior work on differentially private tree learning typically splits budget between choosing which features to split on, where to split
|
| 176 |
+
|
| 177 |
+
them, and learning prediction values for each leaf node (Fletcher & Islam, 2015; Wang et al.; Li et al., 2020).
|
| 178 |
+
|
| 179 |
+
EBMs naturally avoid spending any privacy budget on choosing which features to include in each tree – the "round-robin" schedule of visiting features is completely data agnostic. Furthermore, by choosing the splitting thresholds at random, we can learn the entire structure of each tree without looking at any training data (line 14). Prior work and our empirical evaluations both show that choosing random splits results in little accuracy loss (Geurts et al., 2006; Fletcher & Islam, 2019). We therefore spend the entirety of our budget per iteration on learning the values for each leaf node, which are simply averages of the residuals for the data belonging to each node (lines 16-18). For each leaf, we sum the residuals of data belonging to that leaf, add calibrated Gaussian noise based on their bounded sensitivity $R$ , and divide by a differentially private count of data in each leaf (contained in the previously published $\hat{H}_k$ ). Then, as in non-private EBMs, the noisy tree is merged into the feature function $f_k$ (line 20), and the cyclic boosting procedure moves onto the next feature and continues for all $E$ epochs. The pseudocode for the DP-EBM algorithm is described in Algorithm 2, with modifications to the non-private version highlighted in blue.
|
| 180 |
+
|
| 181 |
+
We now provide the privacy analysis of our algorithm using the GDP framework. Our proof of privacy has the following two components. First we fix a single iteration of the algorithm (lines 13-26), and show that each iteration is $\frac{1}{\sigma} - GDP$ . At the end of each iteration, we publicly release the functions $f_k^t$ for all $k$ . Note that although the final model only uses the $f_k^t$ values of the last iteration, releasing every $f_k^t$ leads to a simpler privacy analysis. Next, we calculate the total privacy loss of our algorithm by simply viewing it as a composition of $E \cdot K$ private mechanisms. It is important to note that composition theorems work even when the mechanisms depend on each other.
|
| 182 |
+
|
| 183 |
+
Theorem 6. Each iteration of our algorithm is $\frac{1}{\sigma} - GDP$ .
|
| 184 |
+
|
| 185 |
+
Proof. We observe that calculating $T$ in the line 16 is the only step of our algorithm where we access the sensitive information of the users. Thus to prove the theorem we need to argue that the noise we are adding satisfies requirements of Theorem 3. Consider
|
| 186 |
+
|
| 187 |
+
$$
|
| 188 |
+
\begin{array}{l} T = \eta \cdot \sum_ {b \in S _ {\ell}} \sum_ {x _ {i} \in \hat {H} _ {k} (b)} r _ {i} ^ {t} \\ = \eta \cdot \sum_ {b \in S _ {\ell}} \sum_ {x _ {i} \in \hat {H} _ {k} (b)} \left(y _ {i} - \sum_ {k} f _ {k} ^ {t - 1} (\rho (\hat {H} _ {k}, x _ {i}))\right) \\ = \eta \cdot \left(\sum_ {b \in S _ {\ell}} \sum_ {x _ {i} \in \hat {H} _ {k} (b)} y _ {i}\right) - Z \\ \end{array}
|
| 189 |
+
$$
|
| 190 |
+
|
| 191 |
+
The second equality follows from the definition of $r_i^t$ as given in the line 25 of the algorithm. Further, $Z$ is computed using publicly released $f_k^{t-1}$ values from the iteration $t-1$ , and hence does not depend on the user data. Therefore, the amount of noise we need to add to the statistic $T$ depends on the sensitivity of quantity $\left( \sum_{b \in S_\ell} \sum_{x_i \in \hat{H}_k(b)} y_i \right)$ , which we argue is bounded by at most $R$ . This follows from three simple facts: 1) The range of each $y_i$ is bounded by at most $R$ ; 2) For each feature, each user contributes exactly to one bin of the histogram; 3) Random splits performed in line 14 of our algorithm partition the histogram bins into disjoint splits, hence each users' data belongs to precisely one split. The proof now follows from Theorem 3.
|
| 192 |
+
|
| 193 |
+
Theorem 7. Our algorithm is $\frac{\sqrt{E\cdot K}}{\sigma} - GDP$
|
| 194 |
+
|
| 195 |
+
Proof. As each iteration of our algorithm is $\frac{1}{\sigma} - GDP$ , the proof follows from the composition of GDP-mechanisms as given in Theorem 4 over all $E \cdot K$ iterations.
|
| 196 |
+
|
| 197 |
+
The GDP bounds can be converted into $(\epsilon, \delta)$ -DP guarantees using Theorem 5. To calibrate $\sigma$ in line 17, we fix the $\varepsilon$ and $\delta$ privacy parameters we want to achieve, use Theorem 5 to calculate $\mu$ , and finally calculate $\sigma$ by setting $\mu = \frac{\sqrt{E \cdot K}}{\sigma}$ .
|
| 198 |
+
|
| 199 |
+
# 4. Experiments
|
| 200 |
+
|
| 201 |
+
We compare the following algorithms on four classification and four regression datasets:
|
| 202 |
+
|
| 203 |
+
- DP-EBM (Algorithm 2): We use the following (default) parameters for all experiments: max_bins = 32, learning_rate = 0.01, n_epochs = 300, max_leaves = 3, with $10\%$ of the total privacy budget allocated to binning and $90\%$ to training. We present two results for DP-EBMs: "EBM-Classic", where we apply strong composition from (Kairouz et al., 2017), and "EBM-GDP", where composition is more tightly tracked via Gaussian Differential Privacy (Dong et al., 2019).
|
| 204 |
+
- Generalized Linear Models: Linear and Logistic Regression are widely used methods for interpretable machine learning. For both models, we use IBM's differential privacy library (Holohan, 2019) which follows the algorithms described in (Sheffet, 2015; Imtiaz & Sarwate, 2016) for linear regression and in (Chaudhuri et al., 2011) for logistic regression.
|
| 205 |
+
- DP Boost: DPBoost is a differentially private gradient boosted decision tree algorithm introduced by (Li et al., 2020). DPBoost builds on top of LightGBM, a popular tree-based boosting framework (Ke et al., 2017).
|
| 206 |
+
|
| 207 |
+
To evaluate performance, we generate 25 randomly drawn 80/20 train-test splits and report the average test-set accuracy and standard deviation at varying $\varepsilon$ and fixed $\delta = 10^{-6}$ . Results are presented in Tables 2 and 3 using root mean squared error (RMSE) as the metric for regression and area under the ROC curve (AUROC) for classification.
|
| 208 |
+
|
| 209 |
+
All models were trained using default or recommended parameters from the literature or open source repositories. Hyperparameter tuning is a privacy-sensitive operation, and how to best partition budget and tune parameters of differentially private models is an open research problem (Liu & Talwar, 2019; Kusner et al., 2015). We did not tune hyperparameters to avoid this complexity. The default parameters for DP-EBMs appear to work well on a variety of datasets, which helps conserve the privacy budget and also makes DP-EBMs easy to use in practice.
|
| 210 |
+
|
| 211 |
+
The datasets used in these experiments (with the exception of the healthcare data, which contains real patient data) are publicly available and summarized in Table 1. We include results from the private healthcare dataset to highlight how these models might perform in a high stakes setting where both privacy and interpretability are critical.
|
| 212 |
+
|
| 213 |
+
Table 1. Experiment dataset statistics and descriptions.
|
| 214 |
+
|
| 215 |
+
<table><tr><td>Dataset</td><td>Domain</td><td>N</td><td>K</td><td>Task</td><td>% Pos</td></tr><tr><td>Adult Income</td><td>Finance</td><td>32,561</td><td>14</td><td>Clas</td><td>24.1%</td></tr><tr><td>Telco Churn</td><td>Business</td><td>7,043</td><td>19</td><td>Clas</td><td>26.6%</td></tr><tr><td>Credit</td><td>Finance</td><td>284,807</td><td>30</td><td>Clas</td><td>0.2%</td></tr><tr><td>Healthcare</td><td>Medicine</td><td>14,199</td><td>46</td><td>Clas</td><td>10.9%</td></tr><tr><td>Cal-Housing</td><td>Real Estate</td><td>20,499</td><td>8</td><td>Reg</td><td>-</td></tr><tr><td>Elevators</td><td>Systems</td><td>16,598</td><td>18</td><td>Reg</td><td>-</td></tr><tr><td>Pol</td><td>Business</td><td>15,000</td><td>49</td><td>Reg</td><td>-</td></tr><tr><td>Wine-Quality</td><td>Food</td><td>5,297</td><td>11</td><td>Reg</td><td>-</td></tr></table>
|
| 216 |
+
|
| 217 |
+
# 4.1. Discussion of Experimental Results
|
| 218 |
+
|
| 219 |
+
As shown in Figure 1 and again in more detail in Tables 2 and 3, DP-EBMs exhibit strong performance across a wide range of $\varepsilon$ values. In classification, the difference in AUROC between non-private DP-EBMs and even $\varepsilon = 0.5$ DP-EBMs is at most 0.05, which is a comparatively modest tradeoff for strong privacy guarantees. At a more modest $\varepsilon = 4$ , the average AUROC for DPBoost, Logistic Regression and DP-EBMs is 0.62, 0.56 and 0.88, respectively. The datasets chosen are not particularly favorable for EBMs – in the non-private setting, LightGBM outperforms EBMs in over half of our experiments. However, when training with differential privacy, DP-EBMs outperform other models in all 40 experimental settings. In the following sections, we offer some thoughts on why the DP-EBM algorithm might be comparatively well suited for differential privacy.
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
Figure 3. Risk of dying as a function of age from three EBMs trained on the healthcare dataset with varying privacy guarantees.
|
| 223 |
+
|
| 224 |
+
# 4.1.1. DP-EBMS VS LINEAR MODELS
|
| 225 |
+
|
| 226 |
+
In the case of DP linear and logistic regression – the current standard for intelligible and private learning – we believe the additional accuracy of DP-EBMs might be explained by the extra flexibility of the GAM model class. In the non-private setting, the non-linear functions learned by GAMs often result in a boost in accuracy over the linear functions learned by linear models, and this advantage appears to translate to the private setting as well. In addition, the iterative nature of gradient boosting might give DP-EBMs the ability to recover from the impact of noise earlier in training.
|
| 227 |
+
|
| 228 |
+
# 4.1.2.DP-EBMs vs DPBOOST
|
| 229 |
+
|
| 230 |
+
Although it may not be surprising that DP-EBMs outperform restricted models such as DP linear regression, it is a little surprising that DP-EBMs outperform DPBoost which is a less restricted model class than DP-EBMs. We believe this might be due to the significant privacy budget savings when learning each tree. Unlike other DP tree-based learning algorithms, DP-EBMs spend no budget learning the structure of each tree, and focus exclusively on learning the best leaf node values. In addition, by growing shallow trees, each leaf often contains a large proportion of the dataset – with default parameters of 2 random splits, in expectation each leaf contains $\approx \frac{1}{3}$ of the full data. This ensures that the impact of noise in the differentially private average calculated per iteration is dispersed across many training datapoints. In contrast, each tree in LightGBM/DPBoost contains up to 31 leaf nodes, thereby operating on much smaller amounts of data and magnifying the impact of noise.
|
| 231 |
+
|
| 232 |
+
# 4.1.3. DP-EBM: CLASSIC COMPOSITION VS GDP
|
| 233 |
+
|
| 234 |
+
We also compare two variants of privacy budget tracking in DP-EBMs: "EBM-Classic", which uses strong composition from (Kairouz et al., 2017), and "EBM-GDP" which uses
|
| 235 |
+
|
| 236 |
+
Gaussian differential privacy recently proposed by (Dong et al., 2019). While we can show analytically that budget tracking with GDP is tighter for our algorithm (and therefore requires less noise for the same privacy loss), it is interesting that the differences in final model accuracy are typically only noticeable in strong privacy settings ( $\varepsilon \leq 1$ ).
|
| 237 |
+
|
| 238 |
+
# 5. Discussion
|
| 239 |
+
|
| 240 |
+
# 5.1. Editing DP-EBM Models
|
| 241 |
+
|
| 242 |
+
While this paper has primarily focused on introducing and comparing DP-EBMs in terms of standard machine learning metrics, we believe it is important to highlight the unique capabilities that arise when combining interpretability with differential privacy. For example, recent work has shown that adding differentially private noise to machine learning algorithms can disproportionately impact minority groups (Cummings et al., 2019; Bagdasaryan & Shmatikov, 2019). This concern is compounded when models are trained and deployed in high-risk domains like finance and healthcare - even small errors on sparse regions of the feature space can have disastrous consequences.
|
| 243 |
+
|
| 244 |
+
With intelligible models like DP-EBMs, some effects of noise on predictions are visible. Figure 3 shows the shape function DP-EBMs learned for the "Age" feature in the healthcare dataset at 3 different levels of privacy. In many healthcare problems, risk should monotonically increase as a function of age. While the non-private and $\varepsilon = 4$ models learn this behavior, there is significant distortion as a result of differentially private noise at $\varepsilon = 1$ . In this example, the $\varepsilon = 1$ model suggests that patients who are 80 are considerably lower risk than those who are 77 or 82, which is almost certainly not a real signal in the dataset. By using an intelligible model, domain experts can inspect the shape function $f_{age}$ and prevent deploying a risky model that under predicts on 80 year olds.
|
| 245 |
+
|
| 246 |
+
Table 2. Area Under the ROC Curve (AUROC) algorithm comparison on classification datasets. Higher is better.
|
| 247 |
+
|
| 248 |
+
<table><tr><td>DATASET</td><td>ε</td><td>DPBOOST</td><td>LOGISTIC REGRESSION</td><td>DPEBM-CLASSIC</td><td>DPEBM-GDP</td></tr><tr><td rowspan="6">ADULT-INCOME</td><td>0.5</td><td>0.558 ± 0.045</td><td>0.488 ± 0.125</td><td>0.873 ± 0.007</td><td>0.875 ± 0.005</td></tr><tr><td>1.0</td><td>0.566 ± 0.034</td><td>0.471 ± 0.111</td><td>0.880 ± 0.006</td><td>0.883 ± 0.005</td></tr><tr><td>2.0</td><td>0.629 ± 0.045</td><td>0.521 ± 0.109</td><td>0.886 ± 0.005</td><td>0.887 ± 0.004</td></tr><tr><td>4.0</td><td>0.734 ± 0.019</td><td>0.549 ± 0.068</td><td>0.889 ± 0.004</td><td>0.889 ± 0.004</td></tr><tr><td>8.0</td><td>0.805 ± 0.011</td><td>0.534 ± 0.070</td><td>0.890 ± 0.004</td><td>0.890 ± 0.004</td></tr><tr><td>Non-Private</td><td>0.928 ± 0.003</td><td>0.603 ± 0.066</td><td>0.923 ± 0.003</td><td>0.923 ± 0.003</td></tr><tr><td rowspan="6">CREDIT-FRAUD</td><td>0.5</td><td>0.442 ± 0.138</td><td>0.558 ± 0.076</td><td>0.959 ± 0.015</td><td>0.966 ± 0.012</td></tr><tr><td>1.0</td><td>0.438 ± 0.114</td><td>0.544 ± 0.135</td><td>0.965 ± 0.014</td><td>0.966 ± 0.013</td></tr><tr><td>2.0</td><td>0.467 ± 0.101</td><td>0.526 ± 0.118</td><td>0.969 ± 0.011</td><td>0.969 ± 0.011</td></tr><tr><td>4.0</td><td>0.465 ± 0.142</td><td>0.539 ± 0.172</td><td>0.969 ± 0.011</td><td>0.969 ± 0.011</td></tr><tr><td>8.0</td><td>0.556 ± 0.145</td><td>0.546 ± 0.156</td><td>0.969 ± 0.011</td><td>0.969 ± 0.011</td></tr><tr><td>Non-Private</td><td>0.726 ± 0.099</td><td>0.922 ± 0.019</td><td>0.965 ± 0.011</td><td>0.965 ± 0.011</td></tr><tr><td rowspan="6">HEALTHCARE</td><td>0.5</td><td>0.515 ± 0.054</td><td>0.463 ± 0.081</td><td>0.714 ± 0.036</td><td>0.793 ± 0.018</td></tr><tr><td>1.0</td><td>0.505 ± 0.051</td><td>0.479 ± 0.071</td><td>0.789 ± 0.016</td><td>0.818 ± 0.012</td></tr><tr><td>2.0</td><td>0.499 ± 0.046</td><td>0.495 ± 0.081</td><td>0.822 ± 0.012</td><td>0.830 ± 0.011</td></tr><tr><td>4.0</td><td>0.567 ± 0.047</td><td>0.542 ± 0.038</td><td>0.834 ± 0.011</td><td>0.835 ± 0.010</td></tr><tr><td>8.0</td><td>0.638 ± 0.036</td><td>0.529 ± 0.048</td><td>0.836 ± 0.010</td><td>0.837 ± 0.010</td></tr><tr><td>Non-Private</td><td>0.836 ± 0.011</td><td>0.744 ± 0.014</td><td>0.847 ± 0.010</td><td>0.847 ± 0.010</td></tr><tr><td rowspan="6">TELCO-CHURN</td><td>0.5</td><td>0.484 ± 0.100</td><td>0.541 ± 0.227</td><td>0.812 ± 0.020</td><td>0.829 ± 0.014</td></tr><tr><td>1.0</td><td>0.458 ± 0.088</td><td>0.479 ± 0.239</td><td>0.832 ± 0.013</td><td>0.835 ± 0.011</td></tr><tr><td>2.0</td><td>0.534 ± 0.109</td><td>0.527 ± 0.236</td><td>0.837 ± 0.010</td><td>0.838 ± 0.012</td></tr><tr><td>4.0</td><td>0.716 ± 0.067</td><td>0.615 ± 0.138</td><td>0.838 ± 0.011</td><td>0.839 ± 0.011</td></tr><tr><td>8.0</td><td>0.787 ± 0.014</td><td>0.673 ± 0.105</td><td>0.839 ± 0.011</td><td>0.839 ± 0.011</td></tr><tr><td>Non-Private</td><td>0.836 ± 0.008</td><td>0.844 ± 0.010</td><td>0.848 ± 0.009</td><td>0.848 ± 0.009</td></tr></table>
|
| 249 |
+
|
| 250 |
+
Table 3. Root Mean Squared Error (RMSE) algorithm comparison on regression datasets. Lower is better.
|
| 251 |
+
|
| 252 |
+
<table><tr><td>DATASET</td><td>ε</td><td>DPBOOST</td><td>LINEAR REGRESSION</td><td>DPEBM-CLASSIC</td><td>DPEBM-GDP</td></tr><tr><td rowspan="6">CAL-HOUSING</td><td>0.5</td><td>383072 ± 41952</td><td>111967 ± 1080</td><td>85652 ± 2724</td><td>79967 ± 1929</td></tr><tr><td>1.0</td><td>204277 ± 19350</td><td>110241 ± 1101</td><td>78527 ± 1230</td><td>76827 ± 1470</td></tr><tr><td>2.0</td><td>122494 ± 7066</td><td>109518 ± 1244</td><td>75491 ± 1404</td><td>74573 ± 1152</td></tr><tr><td>4.0</td><td>96336 ± 3043</td><td>108882 ± 1370</td><td>73967 ± 1028</td><td>73754 ± 1022</td></tr><tr><td>8.0</td><td>90029 ± 2508</td><td>107815 ± 1460</td><td>73327 ± 1118</td><td>73165 ± 955</td></tr><tr><td>Non-Private</td><td>47007 ± 885</td><td>69850 ± 1164</td><td>51644 ± 925</td><td>51644 ± 925</td></tr><tr><td rowspan="6">ELEVATORS</td><td>0.5</td><td>0.051 ± 0.005</td><td>4.671 ± 1.975</td><td>0.010 ± 0.001</td><td>0.006 ± 0.000</td></tr><tr><td>1.0</td><td>0.025 ± 0.002</td><td>2.669 ± 1.214</td><td>0.007 ± 0.000</td><td>0.005 ± 0.000</td></tr><tr><td>2.0</td><td>0.013 ± 0.001</td><td>1.384 ± 0.570</td><td>0.006 ± 0.000</td><td>0.005 ± 0.000</td></tr><tr><td>4.0</td><td>0.008 ± 0.000</td><td>0.754 ± 0.202</td><td>0.005 ± 0.000</td><td>0.004 ± 0.000</td></tr><tr><td>8.0</td><td>0.006 ± 0.000</td><td>0.410 ± 0.201</td><td>0.004 ± 0.000</td><td>0.004 ± 0.000</td></tr><tr><td>Non-Private</td><td>0.002 ± 0.000</td><td>0.003 ± 0.000</td><td>0.004 ± 0.000</td><td>0.004 ± 0.000</td></tr><tr><td rowspan="6">POL</td><td>0.5</td><td>78.190 ± 9.583</td><td>31.326 ± 0.418</td><td>35.156 ± 1.728</td><td>30.988 ± 0.962</td></tr><tr><td>1.0</td><td>50.527 ± 5.482</td><td>30.640 ± 0.288</td><td>30.911 ± 1.014</td><td>28.391 ± 0.585</td></tr><tr><td>2.0</td><td>47.511 ± 4.636</td><td>30.500 ± 0.248</td><td>27.616 ± 0.644</td><td>26.303 ± 0.561</td></tr><tr><td>4.0</td><td>45.592 ± 2.942</td><td>30.463 ± 0.256</td><td>25.454 ± 0.389</td><td>24.934 ± 0.332</td></tr><tr><td>8.0</td><td>45.435 ± 1.109</td><td>30.459 ± 0.258</td><td>24.625 ± 0.230</td><td>24.313 ± 0.237</td></tr><tr><td>Non-Private</td><td>4.703 ± 0.228</td><td>30.464 ± 0.264</td><td>13.780 ± 0.667</td><td>13.780 ± 0.667</td></tr><tr><td rowspan="6">WINE-QALITY</td><td>0.5</td><td>4.647 ± 0.390</td><td>3.621 ± 1.740</td><td>1.589 ± 0.132</td><td>0.938 ± 0.036</td></tr><tr><td>1.0</td><td>2.151 ± 0.302</td><td>2.133 ± 0.713</td><td>1.181 ± 0.074</td><td>0.841 ± 0.025</td></tr><tr><td>2.0</td><td>1.299 ± 0.092</td><td>1.263 ± 0.322</td><td>0.935 ± 0.042</td><td>0.779 ± 0.018</td></tr><tr><td>4.0</td><td>0.946 ± 0.043</td><td>0.940 ± 0.100</td><td>0.807 ± 0.019</td><td>0.746 ± 0.011</td></tr><tr><td>8.0</td><td>0.847 ± 0.021</td><td>0.839 ± 0.035</td><td>0.751 ± 0.013</td><td>0.733 ± 0.014</td></tr><tr><td>Non-Private</td><td>0.622 ± 0.013</td><td>0.759 ± 0.015</td><td>0.681 ± 0.012</td><td>0.681 ± 0.012</td></tr></table>
|
| 253 |
+
|
| 254 |
+

|
| 255 |
+
Figure 4. (Left) Original learned $f_{age}$ from the healthcare dataset with $\varepsilon = 1$ . (Middle, Right) Postprocessed to enforce monotonicity.
|
| 256 |
+
|
| 257 |
+

|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
|
| 261 |
+
In addition to catching errors, domain experts can also correct unwanted learned effects without impacting privacy. Because the shape function $f_{age}$ exactly describes how a model makes predictions, users can edit graphs of any feature to change the model. In our example, modifying y-axis value for $f_{age}$ at ages 78-81 to remove the unwanted blip would remove this noise bias from the model. This form of model editing uses no data, and therefore results in no additional privacy loss under the post-processing property of differential privacy (Dwork et al., 2006). The ability to safely inspect and edit DP-EBM models before deployment is important for creating trust in differentially private models in high risk situations because some of the impacts of noise can be corrected.
|
| 262 |
+
|
| 263 |
+
# 5.2. Constraints such as monotonicity
|
| 264 |
+
|
| 265 |
+
More complex forms of editing are also possible. For example, we can ensure monotonicity across the entire feature by borrowing from the calibration literature and applying isotonic regression on the shape function (Chakravarti, 1989). Isotonic regression uses the Pool Adjacent Violators (PAV) algorithm to minimize the edits necessary to ensure monotonicity, and is optimal with respect to squared error on the differences between the two functions (vanEeden, 1958). Importantly, this process only uses public information from DP-EBMs – the learned shape functions $f_{k}$ , and the histogram definition $\hat{H}_{k}$ .
|
| 266 |
+
|
| 267 |
+
Figure 4 shows the effects of applying isotonic regression to the noisy $f_{age}$ . While enforcing monotonicity is possible with models more expressive than DP-EBMs, this typically requires additional constraints during training and may consume more privacy budget and complicate the privacy analysis. It is a nice advantage of DP-EBMs that monotonicity can be achieved cleanly via post-processing.
|
| 268 |
+
|
| 269 |
+
# 5.3. Differential Privacy as a Regularizer
|
| 270 |
+
|
| 271 |
+
Figures 3 and 5 also show that adding modest amounts of differentially private noise, like $\varepsilon = 4$ , can act as a regularizer to the model. The rise in risk between ages 50 and 90 here is smoother, whereas the non-private version learns a "jumper" function. Smoothness is traditionally a difficult property to achieve with tree-based boosted GAMs. The non-private EBM algorithm typically wraps the training process in multiple iterations of bagging to make graphs smoother (Lou et al., 2012; Caruana et al., 2015). Our experiments suggest that modest amounts of differentially private noise might act as an effective regularization tool.
|
| 272 |
+
|
| 273 |
+
The relationship between smoothness and interpretability is complex: smooth graphs may be easier to interpret, but over-regularization can hide real signals in the data. The use of differential privacy as a regularizer is well known (Chaudhuri et al., 2011). Our paper visibly reinforces this notion through intelligibility, and we believe studying this effect further on GAMs might be interesting future research.
|
| 274 |
+
|
| 275 |
+
# 6. Conclusion
|
| 276 |
+
|
| 277 |
+
We present DP-EBMs, a differentially private learning algorithm for GAMs which achieves remarkably high accuracy and interpretability with strong privacy guarantees. Our empirical evaluations show that DP-EBMs outperform other differentially private learning algorithms for both classification and regression on tabular datasets. Beyond just accuracy, we also show how interpretability can complement differential privacy by enabling users to uncover undesirable effects of noise, edit unwanted bias out of their models, and enforce desirable constraints like monotonicity with no additional privacy loss. These practical advantages might represent an important step forward for enabling the use of differentially private models in industries like healthcare, finance, and criminal justice.
|
| 278 |
+
|
| 279 |
+

|
| 280 |
+
Figure 5. Shape function comparisons for all numeric features in the Adult Income dataset. We include the standard EBMs wrapped in 25 layers of bagging, EBMs without bagging, and two DP-EBMs at different privacy levels ( $\varepsilon = 4$ and $\varepsilon = 1$ ). As expected, adding Gaussian DP noise acts as a strong regularizer — the graphs on the right are smoother than those on the left. In some cases this regularization is too strong, yet in other cases such as $fnlwgt$ the extra regularization might actually reduce model variance and improve intelligibility.
|
| 281 |
+
|
| 282 |
+
# References
|
| 283 |
+
|
| 284 |
+
Bagdasaryan, E. and Shmatikov, V. Differential privacy has disparate impact on model accuracy. arXiv preprint arXiv:1905.12101, 2019.
|
| 285 |
+
Breiman, L., Friedman, J., Stone, C. J., and Olshen, R. A. Classification and regression trees. CRC press, 1984.
|
| 286 |
+
Bu, Z., Dong, J., Long, Q., and Su, W. J. Deep learning with gaussian differential privacy. Harvard data science review, 2020(23), 2020.
|
| 287 |
+
Buitinck, L., Louppe, G., Blondel, M., Pedregosa, F., Mueller, A., Grisel, O., Niculae, V., Prettenhofer, P., Gramfort, A., Grobler, J., Layton, R., VanderPlas, J., Joly, A., Holt, B., and Varoquaux, G. API design for machine learning software: experiences from the scikit-learn project. In ECML PKDD Workshop: Languages for Data Mining and Machine Learning, pp. 108-122, 2013.
|
| 288 |
+
Carlini, N., Liu, C., Kos, J., Erlingsson, U., and Song, D. The secret sharer: Measuring unintended neural network memorization & extracting secrets. 2018.
|
| 289 |
+
Caruana, R., Lou, Y., Gehrke, J., Koch, P., Sturm, M., and Elhadad, N. Intelligible models for healthcare: Predicting pneumonia risk and hospital 30-day readmission. In Proceedings of the 21th ACM SIGKDD international conference on knowledge discovery and data mining, pp. 1721-1730, 2015.
|
| 290 |
+
Chakravarti, N. Isotonic median regression: a linear programming approach. Mathematics of operations research, 14(2):303-308, 1989.
|
| 291 |
+
Chang, C.-H., Tan, S., Lengerich, B., Goldenberg, A., and Caruana, R. How interpretable and trustworthy are gams? arXiv preprint arXiv:2006.06466, 2020.
|
| 292 |
+
Chaudhuri, K., Monteleoni, C., and Sarwate, A. D. Differentially private empirical risk minimization. Journal of Machine Learning Research, 12(Mar):1069-1109, 2011.
|
| 293 |
+
Chen, T. and Guestrin, C. Xgboost: A scalable tree boosting system. In Proceedings of the 22nd acm sigkdd international conference on knowledge discovery and data mining, pp. 785-794, 2016.
|
| 294 |
+
Cummings, R., Gupta, V., Kimpara, D., and Morgenstern, J. On the compatibility of privacy and fairness. In Adjunct Publication of the 27th Conference on User Modeling, Adaptation and Personalization, pp. 309-315, 2019.
|
| 295 |
+
Dong, J., Roth, A., and Su, W. J. Gaussian differential privacy. arXiv preprint arXiv:1905.02383, 2019.
|
| 296 |
+
|
| 297 |
+
Dua, D. and Graff, C. UCI machine learning repository, 2017. URL http://archive.ics.uci.edu/ml.
|
| 298 |
+
Dwork, C., McSherry, F., Nissim, K., and Smith, A. Calibrating noise to sensitivity in private data analysis. In Theory of cryptography conference, pp. 265-284. Springer, 2006.
|
| 299 |
+
Dwork, C., Roth, A., et al. The algorithmic foundations of differential privacy. Foundations and Trends® in Theoretical Computer Science, 9(3-4):211-407, 2014.
|
| 300 |
+
Fletcher, S. and Islam, M. Z. A differentially private decision forest. *AusDM*, 15:99–108, 2015.
|
| 301 |
+
Fletcher, S. and Islam, M. Z. Decision tree classification with differential privacy: A survey. ACM Computing Surveys (CSUR), 52(4):1-33, 2019.
|
| 302 |
+
Geurts, P., Ernst, D., and Wehenkel, L. Extremely randomized trees. Machine learning, 63(1):3-42, 2006.
|
| 303 |
+
Harris, C. R., Millman, K. J., van der Walt, S. J., Gommers, R., Virtanen, P., Cournapeau, D., Wieser, E., Taylor, J., Berg, S., Smith, N. J., Kern, R., Picus, M., Hoyer, S., van Kerkwijk, M. H., Brett, M., Haldane, A., del Río, J. F., Wiebe, M., Peterson, P., Gérard-Marchant, P., Sheppard, K., Reddy, T., Weckesser, W., Abbasi, H., Gohlke, C., and Oliphant, T. E. Array programming with NumPy. Nature, 585(7825):357-362, September 2020. doi: 10.1038/s41586-020-2649-2. URL https://doi.org/10.1038/s41586-020-2649-2.
|
| 304 |
+
Hastie, T. J. and Tibshirani, R. J. Generalized additive models, volume 43. CRC press, 1990.
|
| 305 |
+
Holohan, N. Diffprivlib: The ibm differential privacy library. https://github.com/IBM/differential-privacy-library, 2019.
|
| 306 |
+
Imtiaz, H. and Sarwate, A. D. Symmetric matrix perturbation for differentially-private principal component analysis. In 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 2339-2343. IEEE, 2016.
|
| 307 |
+
Inc., P. T. Collaborative data science, 2015. URL https://plot.ly.
|
| 308 |
+
Kairouz, P., Oh, S., and Viswanath, P. The composition theorem for differential privacy. IEEE Transactions on Information Theory, 63(6):4037-4049, 2017.
|
| 309 |
+
Ke, G., Meng, Q., Finley, T., Wang, T., Chen, W., Ma, W., Ye, Q., and Liu, T.-Y. Lightgbm: A highly efficient gradient boosting decision tree. In Advances in neural information processing systems, pp. 3146-3154, 2017.
|
| 310 |
+
|
| 311 |
+
Kusner, M., Gardner, J., Garnett, R., and Weinberger, K. Differentially private bayesian optimization. In International conference on machine learning, pp. 918-927. PMLR, 2015.
|
| 312 |
+
Li, Q., Wu, Z., Wen, Z., and He, B. Privacy-preserving gradient boosting decision trees. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pp. 784-791, 2020.
|
| 313 |
+
Liu, J. and Talwar, K. Private selection from private candidates. In Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing, pp. 298-309, 2019.
|
| 314 |
+
Lou, Y., Caruana, R., and Gehrke, J. Intelligible models for classification and regression. In Proceedings of the 18th ACM SIGKDD international conference on Knowledge discovery and data mining, pp. 150-158, 2012.
|
| 315 |
+
Lundberg, S. M. and Lee, S.-I. A unified approach to interpreting model predictions. In Advances in neural information processing systems, pp. 4765-4774, 2017.
|
| 316 |
+
Melis, L., Song, C., De Cristofaro, E., and Shmatikov, V. Exploiting unintended feature leakage in collaborative learning. In 2019 IEEE Symposium on Security and Privacy (SP), pp. 691-706. IEEE, 2019.
|
| 317 |
+
Nori, H., Jenkins, S., Koch, P., and Caruana, R. Interpretml: A unified framework for machine learning interpretability. arXiv preprint arXiv:1909.09223, 2019.
|
| 318 |
+
Pedregosa, F., Varoquaux, G., Gramfort, A., Michel, V., Thirion, B., Grisel, O., Blondel, M., Prettenhofer, P., Weiss, R., Dubourg, V., Vanderplas, J., Passos, A., Cournaepau, D., Brucher, M., Perrot, M., and Duchesnay, E. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830, 2011.
|
| 319 |
+
Rudin, C. Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence, 1(5):206-215, 2019.
|
| 320 |
+
Sheffet, O. Private approximations of the 2nd-moment matrix using existing techniques in linear regression. arXiv preprint arXiv:1507.00056, 2015.
|
| 321 |
+
vanEeden, C. Testing and estimating ordered parameters of probability distribution. 1958.
|
| 322 |
+
Virtanen, P., Gommers, R., Oliphant, T. E., Haberland, M., Reddy, T., Cournapeau, D., Burovski, E., Peterson, P., Weckesser, W., Bright, J., van der Walt, S. J., Brett, M., Wilson, J., Millman, K. J., Mayorov, N., Nelson, A. R. J., Jones, E., Kern, R., Larson, E., Carey, C. J., Polat, I., Feng, Y., Moore, E. W., VanderPlas, J., Laxalde, D.,
|
| 323 |
+
|
| 324 |
+
Perktold, J., Cirmrnan, R., Henriksen, I., Quintero, E. A., Harris, C. R., Archibald, A. M., Ribeiro, A. H., Pedregosa, F., van Mulbregt, P., and SciPy 1.0 Contributors. SciPy 1.0: Fundamental Algorithms for Scientific Computing in Python. Nature Methods, 17:261-272, 2020. doi: 10.1038/s41592-019-0686-2.
|
| 325 |
+
Wang, C., Han, B., Patel, B., Mohideen, F., and Rudin, C. In pursuit of interpretable, fair and accurate machine learning for criminal recidivism prediction. arXiv preprint arXiv:2005.04176, 2020.
|
| 326 |
+
Wang, K. W., Dick, T., and Balcan, M.-F. Scalable and provably accurate algorithms for differentially private distributed decision tree learning.
|
accuracyinterpretabilityanddifferentialprivacyviaexplainableboosting/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:beac949a808723f7b01959cf6a9a2e0419898625f86aed2557f7ca3e435f5f4f
|
| 3 |
+
size 635666
|
accuracyinterpretabilityanddifferentialprivacyviaexplainableboosting/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f0510e31e53194a0aec19fc436a12ae70977d1077e195cf68b14724b95f41fa7
|
| 3 |
+
size 410727
|
accuracyonthelineonthestrongcorrelationbetweenoutofdistributionandindistributiongeneralization/c0c386ca-49b0-4e49-9828-2123bf346129_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:51b4e6c46d75c61ec9c511833af66863434bdc20796b3846d9b0a53cc6f2bbda
|
| 3 |
+
size 101724
|
accuracyonthelineonthestrongcorrelationbetweenoutofdistributionandindistributiongeneralization/c0c386ca-49b0-4e49-9828-2123bf346129_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b0bac6ff5712dba9daf90a709c9dd531d949607164e5a83b2f92b692aecbde9d
|
| 3 |
+
size 134133
|