Add Batch fe2cc391-51d7-4b8e-b65e-cf8cbb23419a
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- abetteralternativetoerrorfeedbackforcommunicationefficientdistributedlearning/e376c575-4148-454f-aa7a-c6e146714eb6_content_list.json +3 -0
- abetteralternativetoerrorfeedbackforcommunicationefficientdistributedlearning/e376c575-4148-454f-aa7a-c6e146714eb6_model.json +3 -0
- abetteralternativetoerrorfeedbackforcommunicationefficientdistributedlearning/e376c575-4148-454f-aa7a-c6e146714eb6_origin.pdf +3 -0
- abetteralternativetoerrorfeedbackforcommunicationefficientdistributedlearning/full.md +625 -0
- abetteralternativetoerrorfeedbackforcommunicationefficientdistributedlearning/images.zip +3 -0
- abetteralternativetoerrorfeedbackforcommunicationefficientdistributedlearning/layout.json +3 -0
- ablockminifloatrepresentationfortrainingdeepneuralnetworks/ad74685f-245b-4cd8-b019-5a662efbb258_content_list.json +3 -0
- ablockminifloatrepresentationfortrainingdeepneuralnetworks/ad74685f-245b-4cd8-b019-5a662efbb258_model.json +3 -0
- ablockminifloatrepresentationfortrainingdeepneuralnetworks/ad74685f-245b-4cd8-b019-5a662efbb258_origin.pdf +3 -0
- ablockminifloatrepresentationfortrainingdeepneuralnetworks/full.md +328 -0
- ablockminifloatrepresentationfortrainingdeepneuralnetworks/images.zip +3 -0
- ablockminifloatrepresentationfortrainingdeepneuralnetworks/layout.json +3 -0
- acceleratingconvergenceofreplicaexchangestochasticgradientmcmcviavariancereduction/d9dd3a22-cf24-49a7-9315-1623e966a68f_content_list.json +3 -0
- acceleratingconvergenceofreplicaexchangestochasticgradientmcmcviavariancereduction/d9dd3a22-cf24-49a7-9315-1623e966a68f_model.json +3 -0
- acceleratingconvergenceofreplicaexchangestochasticgradientmcmcviavariancereduction/d9dd3a22-cf24-49a7-9315-1623e966a68f_origin.pdf +3 -0
- acceleratingconvergenceofreplicaexchangestochasticgradientmcmcviavariancereduction/full.md +0 -0
- acceleratingconvergenceofreplicaexchangestochasticgradientmcmcviavariancereduction/images.zip +3 -0
- acceleratingconvergenceofreplicaexchangestochasticgradientmcmcviavariancereduction/layout.json +3 -0
- accuratelearningofgraphrepresentationswithgraphmultisetpooling/91d90628-ce28-4dff-8086-c85b0048d45c_content_list.json +3 -0
- accuratelearningofgraphrepresentationswithgraphmultisetpooling/91d90628-ce28-4dff-8086-c85b0048d45c_model.json +3 -0
- accuratelearningofgraphrepresentationswithgraphmultisetpooling/91d90628-ce28-4dff-8086-c85b0048d45c_origin.pdf +3 -0
- accuratelearningofgraphrepresentationswithgraphmultisetpooling/full.md +0 -0
- accuratelearningofgraphrepresentationswithgraphmultisetpooling/images.zip +3 -0
- accuratelearningofgraphrepresentationswithgraphmultisetpooling/layout.json +3 -0
- achievinglinearspeedupwithpartialworkerparticipationinnoniidfederatedlearning/24ddb598-0a37-419b-abfd-9ac900132497_content_list.json +3 -0
- achievinglinearspeedupwithpartialworkerparticipationinnoniidfederatedlearning/24ddb598-0a37-419b-abfd-9ac900132497_model.json +3 -0
- achievinglinearspeedupwithpartialworkerparticipationinnoniidfederatedlearning/24ddb598-0a37-419b-abfd-9ac900132497_origin.pdf +3 -0
- achievinglinearspeedupwithpartialworkerparticipationinnoniidfederatedlearning/full.md +622 -0
- achievinglinearspeedupwithpartialworkerparticipationinnoniidfederatedlearning/images.zip +3 -0
- achievinglinearspeedupwithpartialworkerparticipationinnoniidfederatedlearning/layout.json +3 -0
- acritiqueofselfexpressivedeepsubspaceclustering/b7e280b4-6940-4a21-8b4c-09799d3f6573_content_list.json +3 -0
- acritiqueofselfexpressivedeepsubspaceclustering/b7e280b4-6940-4a21-8b4c-09799d3f6573_model.json +3 -0
- acritiqueofselfexpressivedeepsubspaceclustering/b7e280b4-6940-4a21-8b4c-09799d3f6573_origin.pdf +3 -0
- acritiqueofselfexpressivedeepsubspaceclustering/full.md +616 -0
- acritiqueofselfexpressivedeepsubspaceclustering/images.zip +3 -0
- acritiqueofselfexpressivedeepsubspaceclustering/layout.json +3 -0
- actingindelayedenvironmentswithnonstationarymarkovpolicies/eec68046-21c8-474a-9ba0-d6a0431e3a97_content_list.json +3 -0
- actingindelayedenvironmentswithnonstationarymarkovpolicies/eec68046-21c8-474a-9ba0-d6a0431e3a97_model.json +3 -0
- actingindelayedenvironmentswithnonstationarymarkovpolicies/eec68046-21c8-474a-9ba0-d6a0431e3a97_origin.pdf +3 -0
- actingindelayedenvironmentswithnonstationarymarkovpolicies/full.md +269 -0
- actingindelayedenvironmentswithnonstationarymarkovpolicies/images.zip +3 -0
- actingindelayedenvironmentswithnonstationarymarkovpolicies/layout.json +3 -0
- activationleveluncertaintyindeepneuralnetworks/1e5b4beb-60f0-452a-bd7e-2a22c1e1c5f7_content_list.json +3 -0
- activationleveluncertaintyindeepneuralnetworks/1e5b4beb-60f0-452a-bd7e-2a22c1e1c5f7_model.json +3 -0
- activationleveluncertaintyindeepneuralnetworks/1e5b4beb-60f0-452a-bd7e-2a22c1e1c5f7_origin.pdf +3 -0
- activationleveluncertaintyindeepneuralnetworks/full.md +395 -0
- activationleveluncertaintyindeepneuralnetworks/images.zip +3 -0
- activationleveluncertaintyindeepneuralnetworks/layout.json +3 -0
- activecontrastivelearningofaudiovisualvideorepresentations/5bb5cf6c-3156-4aaf-937c-32d7062f861e_content_list.json +3 -0
- activecontrastivelearningofaudiovisualvideorepresentations/5bb5cf6c-3156-4aaf-937c-32d7062f861e_model.json +3 -0
abetteralternativetoerrorfeedbackforcommunicationefficientdistributedlearning/e376c575-4148-454f-aa7a-c6e146714eb6_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8e154259c9fe7400e8ab8099188e7afff55f67f93f9b7616a79128bc2fc1290c
|
| 3 |
+
size 120524
|
abetteralternativetoerrorfeedbackforcommunicationefficientdistributedlearning/e376c575-4148-454f-aa7a-c6e146714eb6_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:125abcce6f8a7161c49ed85eaef2d177eb0afb97c6176c52d6162a075f4aa46f
|
| 3 |
+
size 144344
|
abetteralternativetoerrorfeedbackforcommunicationefficientdistributedlearning/e376c575-4148-454f-aa7a-c6e146714eb6_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:03014a3d102e285ccd1b153dc8a5d558f81c390769da057f9ae11d6a7f980150
|
| 3 |
+
size 7381923
|
abetteralternativetoerrorfeedbackforcommunicationefficientdistributedlearning/full.md
ADDED
|
@@ -0,0 +1,625 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A BETTER ALTERNATIVE TO ERROR FEEDBACK FOR COMMUNICATION-EFFICIENT DISTRIBUTED LEARNING
|
| 2 |
+
|
| 3 |
+
Samuel Horváth and Peter Richtárik
|
| 4 |
+
KAUST
|
| 5 |
+
|
| 6 |
+
Thuwal, Saudi Arabia
|
| 7 |
+
|
| 8 |
+
{samuel.horvath, peter.richtarik}@kaust.edu.sa
|
| 9 |
+
|
| 10 |
+
# ABSTRACT
|
| 11 |
+
|
| 12 |
+
Modern large-scale machine learning applications require stochastic optimization algorithms to be implemented on distributed compute systems. A key bottleneck of such systems is the communication overhead for exchanging information (e.g., stochastic gradients) across the workers. Among the many techniques proposed to remedy this issue, one of the most successful is the framework of compressed communication with error feedback (EF). EF remains the only known technique that can deal with the error induced by contractive compressors which are not unbiased, such as Top- $K$ or PowerSGD. In this paper, we propose a new and theoretically and practically better alternative to EF for dealing with contractive compressors. In particular, we propose a construction which can transform any contractive compressor into an induced unbiased compressor. Following this transformation, existing methods able to work with unbiased compressors can be applied. We show that our approach leads to vast improvements over EF, including reduced memory requirements, better communication complexity guarantees and fewer assumptions. We further extend our results to federated learning with partial participation following an arbitrary distribution over the nodes, and demonstrate the benefits thereof. We perform several numerical experiments which validate our theoretical findings.
|
| 13 |
+
|
| 14 |
+
# 1 INTRODUCTION
|
| 15 |
+
|
| 16 |
+
We consider distributed optimization problems of the form
|
| 17 |
+
|
| 18 |
+
$$
|
| 19 |
+
\min _ {x \in \mathbb {R} ^ {d}} f (x) := \frac {1}{n} \sum_ {i = 1} ^ {n} f _ {i} (x), \tag {1}
|
| 20 |
+
$$
|
| 21 |
+
|
| 22 |
+
where $x \in \mathbb{R}^d$ represents the weights of a statistical model we wish to train, $n$ is the number of nodes, and $f_i \colon \mathbb{R}^d \to \mathbb{R}$ is a smooth differentiable loss function composed of data stored on worker $i$ . In a classical distributed machine learning scenario, $f_i(x) \coloneqq \mathrm{E}_{\zeta \sim \mathcal{D}_i}[f_\zeta(x)]$ is the expected loss of model $x$ with respect to the local data distribution $\mathcal{D}_i$ of the form, and $f_\zeta \colon \mathbb{R}^d \to \mathbb{R}$ is the loss on the single data point $\zeta$ . This definition allows for different distributions $\mathcal{D}_1, \ldots, \mathcal{D}_n$ on each node, which means that the functions $f_1, \ldots, f_n$ can have different minimizers. This framework covers Stochastic Optimization when either $n = 1$ or all $\mathcal{D}_i$ are identical, Empirical Risk Minimization (ERM), when $f_i(x)$ can be expressed as a finite average, i.e., $f_i(x) = \frac{1}{m_i} \sum_{i=1}^{m_i} f_{ij}(x)$ for some $f_{ij} : \mathbb{R}^d \to \mathbb{R}$ , and Federated Learning (FL) (Kairouz et al., 2019) where each node represents a client.
|
| 23 |
+
|
| 24 |
+
Communication Bottleneck. In distributed training, model updates (or gradient vectors) have to be exchanged in each iteration. Due to the size of the communicated messages for commonly considered deep models (Alistarh et al., 2016), this represents significant bottleneck of the whole optimization procedure. To reduce the amount of data that has to be transmitted, several strategies were proposed.
|
| 25 |
+
|
| 26 |
+
One of the most popular strategies is to incorporate local steps and communicated updates every few iterations only (Stich, 2019a; Lin et al., 2018a; Stich & Karimireddy, 2020; Karimireddy et al.,
|
| 27 |
+
|
| 28 |
+
2019a; Khaled et al., 2020). Unfortunately, despite their practical success, local methods are poorly understood and their theoretical foundations are currently lacking. Almost all existing error guarantees are dominated by a simple baseline, minibatch SGD (Woodworth et al., 2020).
|
| 29 |
+
|
| 30 |
+
In this work, we focus on another popular approach: gradient compression. In this approach, instead of transmitting the full dimensional (gradient) vector $g \in \mathbb{R}^d$ , one transmits a compressed vector $\mathcal{C}(g)$ , where $\mathcal{C}: \mathbb{R}^d \to \mathbb{R}^d$ is a (possibly random) operator chosen such that $\mathcal{C}(g)$ can be represented using fewer bits, for instance by using limited bit representation (quantization) or by enforcing sparsity. A particularly popular class of quantization operators is based on random dithering (Goodall, 1951; Roberts, 1962); see (Alistarh et al., 2016; Wen et al., 2017; Zhang et al., 2017; Horváth et al., 2019a; Ramezani-Kebrya et al., 2019). Much sparser vectors can be obtained by random sparsification techniques that randomly mask the input vectors and only preserve a constant number of coordinates (Wangni et al., 2018; Konečný & Richtárik, 2018; Stich et al., 2018; Mishchenko et al., 2019b; Vogels et al., 2019). There is also a line of work (Horváth et al., 2019a; Basu et al., 2019) in which a combination of sparsification and quantization was proposed to obtain a more aggressive effect. We will not further distinguish between sparsification and quantization approaches, and refer to all of them as compression operators hereafter.
|
| 31 |
+
|
| 32 |
+
Considering both practice and theory, compression operators can be split into two groups: biased and unbiased. For the unbiased compressors, $\mathcal{C}(g)$ is required to be an unbiased estimator of the update $g$ . Once this requirement is lifted, extra tricks are necessary for Distributed Compressed Stochastic Gradient Descent (DCSGD) (Alistarh et al., 2016; 2018; Khirirat et al., 2018) employing such a compressor to work, even if the full gradient is computed by each node. Indeed, the naive approach can lead to exponential divergence (Beznosikov et al., 2020), and Error Feedback (EF) (Seide et al., 2014; Karimireddy et al., 2019b) is the only known mechanism able to remedy the situation.
|
| 33 |
+
|
| 34 |
+
Contributions. Our contributions can be summarized as follows:
|
| 35 |
+
|
| 36 |
+
- Induced Compressor. When used within the stabilizing EF framework, biased compressors (e.g., Top-K) can often achieve superior performance when compared to their unbiased counterparts (e.g., Rand-K). This is often attributed to their low variance. However, despite ample research in this area, EF remains the only known mechanism that allows the use of these powerful biased compressors. Our key contribution is the development of a simple but remarkably effective alternative—and this is the only alternative we know of—which we argue leads to better and more versatile methods both in theory and practice. In particular, we propose a general construction that can transform any biased compressor, such as Top-K, into an unbiased one for which we coin the name induced compressor (Section 3). Instead of using the desired biased compressor within EF, our proposal is to instead use the induced compressor within an appropriately chosen existing method designed for unbiased compressors, such as distributed compressed SGD (DCSGD) (Khirirat et al., 2018), variance reduced DCSGD (DIANA) (Mishchenko et al., 2019a) or accelerated DIANA (ADIANA) (Li et al., 2020). While EF can bee seen as a version of DCSGD which can work with biased compressors, variance reduced nor accelerated variants of EF were not known at the time of writing this paper.
|
| 37 |
+
|
| 38 |
+
- Better Theory for DCSGD. As a secondary contribution, we provide a new and tighter theoretical analysis of DCSGD under weaker assumptions. If $f$ is $\mu$ -quasi convex (not necessarily convex) and local functions $f_{i}$ are $(L,\sigma^2)$ -smooth (weaker version of $L$ -smoothness with strong growth condition), we obtain the rate $\mathcal{O}\left(\delta_n L r^0 \exp \left[-\frac{\mu T}{4\delta_n L}\right] + \frac{(\delta_n - 1) D + \delta^2 / n}{\mu T}\right)$ , where $\delta_{n} = 1 + \frac{\delta - 1}{n}$ and $\delta \geq 1$ is the parameter which bounds the second moment of the compression operator, and $T$ is the number of iterations. This rate has linearly decreasing dependence on the number of nodes $n$ , which is strictly better than the best-known rate for DCSGD with EF, whose convergence does not improve as the number of nodes increases, which is one of the main disadvantages of using EF. Moreover, EF requires extra assumptions. In addition, while the best-known rates for EF (Karimireddy et al., 2019b; Beznosikov et al., 2020) are expressed in terms of functional values, our theory guarantees convergence in both iterates and functional values. Another practical implication of our findings is the reduction of the memory requirements by half; this is because in DCSGD one does not need to store the error vector.
|
| 39 |
+
|
| 40 |
+
- Partial Participation. We further extend our results to obtain the first convergence guarantee for partial participation with arbitrary distributions over nodes, which plays a key role in Federated Learning (FL).
|
| 41 |
+
|
| 42 |
+
Algorithm 1 DCSGD
|
| 43 |
+
1: Input: $\{\eta^k\}_{k=0}^T > 0, x_0$
|
| 44 |
+
2: for $k = 0, 1, \ldots, T$ do
|
| 45 |
+
3: Parallel: Worker side
|
| 46 |
+
4: for $i = 1, \ldots, n$ do
|
| 47 |
+
5: obtain $g_i^k$
|
| 48 |
+
6: send $\Delta_i^k = \mathcal{C}^k(g_i^k)$ to master
|
| 49 |
+
7: [no need to keep track of errors]
|
| 50 |
+
8: end for
|
| 51 |
+
9: Master side
|
| 52 |
+
10: aggregate $\Delta^k = \frac{1}{n} \sum_{i=1}^{n} \Delta_i^k$
|
| 53 |
+
11: broadcast $\Delta^k$ to each worker
|
| 54 |
+
12: Parallel: Worker side
|
| 55 |
+
13: for $i = 1, \ldots, n$ do
|
| 56 |
+
14: $x^{k+1} = x^k - \eta^k \Delta^k$
|
| 57 |
+
15: end for
|
| 58 |
+
16: end for
|
| 59 |
+
|
| 60 |
+
Algorithm 2 DCSGD with Error Feedback
|
| 61 |
+
1: Input: $\{\eta^k\}_{k=0}^T > 0, x_0, e_i^0 = 0 \forall i \in [n]$
|
| 62 |
+
2: for $k = 0, 1, \ldots, T$ do
|
| 63 |
+
3: Parallel: Worker side
|
| 64 |
+
4: for $i = 1, \ldots, n$ do
|
| 65 |
+
5: obtain $g_i^k$
|
| 66 |
+
6: send $\Delta_i^k = \mathcal{C}^k (\eta^k g_i^k + e_i^k)$ to master
|
| 67 |
+
7: $e_i^{k+1} = \eta^k g_i^k + e_i^k - \Delta_i^k$
|
| 68 |
+
8: end for
|
| 69 |
+
9: Master side
|
| 70 |
+
10: aggregate $\Delta^k = \frac{1}{n} \sum_{i=1}^n \Delta_i^k$
|
| 71 |
+
11: broadcast $\Delta^k$ to each worker
|
| 72 |
+
12: Parallel: Worker side
|
| 73 |
+
13: for $i = 1, \ldots, n$ do
|
| 74 |
+
14: $x^{k+1} = x^k - \Delta^k$
|
| 75 |
+
15: end for
|
| 76 |
+
16: end for
|
| 77 |
+
|
| 78 |
+
- Experimental Validation. Finally, we provide an experimental evaluation on an array of classification tasks with CIFAR10 dataset corroborating our theoretical findings.
|
| 79 |
+
|
| 80 |
+
# 2 ERROR FEEDBACK IS NOT A GOOD IDEA WHEN USING UNBIASED COMPRESSORS
|
| 81 |
+
|
| 82 |
+
In this section we first introduce the notions of unbiased and general compression operators, and then compare Distributed Compressed SGD (DCSGD) without (Algorithm 1) and with (Algorithm 2) Error Feedback.
|
| 83 |
+
|
| 84 |
+
Unbiased vs General Compression Operators. We start with the definition of unbiased and general compression operators (Cordonnier, 2018; Stich et al., 2018; Koloskova et al., 2019).
|
| 85 |
+
|
| 86 |
+
Definition 1 (Unbiased Compression Operator). A randomized mapping $\mathcal{C} \colon \mathbb{R}^d \to \mathbb{R}^d$ is an unbiased compression operator (unbiased compressor) if there exists $\delta \geq 1$ such that
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\operatorname {E} \left[ \mathcal {C} (x) \right] = x, \quad \operatorname {E} \| \mathcal {C} (x) \| ^ {2} \leq \delta \| x \| ^ {2}, \quad \forall x \in \mathbb {R} ^ {d}. \tag {2}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
If this holds, we will for simplicity write $\mathcal{C} \in \mathbb{U}(\delta)$ .
|
| 93 |
+
|
| 94 |
+
Definition 2 (General Compression Operator). A (possibly) randomized mapping $\mathcal{C} \colon \mathbb{R}^d \to \mathbb{R}^d$ is a general compression operator (general compressor) if there exists $\lambda > 0$ and $\delta \geq 1$ such that
|
| 95 |
+
|
| 96 |
+
$$
|
| 97 |
+
\operatorname {E} \left[ \| \lambda \mathcal {C} (x) - x \| ^ {2} \right] \leq \left(1 - \frac {1}{\delta}\right) \| x \| ^ {2}, \quad \forall x \in \mathbb {R} ^ {d}. \tag {3}
|
| 98 |
+
$$
|
| 99 |
+
|
| 100 |
+
If this holds, we will for simplicity write $\mathcal{C} \in \mathbb{C}(\delta)$ .
|
| 101 |
+
|
| 102 |
+
The following lemma provides a link between these notions (see, e.g. Beznosikov et al. (2020)).
|
| 103 |
+
|
| 104 |
+
Lemma 1. If $\mathcal{C} \in \mathbb{U}(\delta)$ , then (3) holds with $\lambda = \frac{1}{\delta}$ , i.e., $\mathcal{C} \in \mathbb{C}(\delta)$ . That is, $\mathbb{U}(\delta) \subset \mathbb{C}(\delta)$ .
|
| 105 |
+
|
| 106 |
+
Note that the opposite inclusion to that established in the above lemma does not hold. For instance, the Top- $K$ operator belongs to $\mathbb{C}(\delta)$ , but does not belong to $\mathbb{U}(\delta)$ . In the next section we develop a procedure for transforming any mapping $\mathcal{C}:\mathbb{R}^d\to \mathbb{R}^d$ (and in particular, any general compressor) into a closely related induced unbiased compressor.
|
| 107 |
+
|
| 108 |
+
Distributed SGD with vs without Error Feedback. In the rest of this section, we compare the convergence rates for DCSGD (Algorithm 1) and DCSGD with EF (Algorithm 2). We do this comparison under standard assumptions (Karimi et al., 2016; Bottou et al., 2018; Necoara et al., 2019; Gower et al., 2019; Stich, 2019b; Stich & Karimireddy, 2020), listed next.
|
| 109 |
+
|
| 110 |
+
First, we assume throughout that $f$ has a unique minimizer $x^{\star}$ , and let $f^{\star} = f(x^{\star}) > -\infty$ .
|
| 111 |
+
|
| 112 |
+
Assumption 1 ( $\mu$ -quasi convexity). $f$ is $\mu$ -quasi convex, i.e.,
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
f ^ {\star} \geq f (x) + \left\langle \nabla f (x), x ^ {\star} - x \right\rangle + \frac {\mu}{2} \left\| x ^ {\star} - x \right\| ^ {2}, \quad \forall x \in \mathbb {R} ^ {d}. \tag {4}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
Assumption 2 (unbiased gradient oracle). The stochastic gradient used in Algorithms 1 and 2 satisfies
|
| 119 |
+
|
| 120 |
+
$$
|
| 121 |
+
\operatorname {E} \left[ g _ {i} ^ {k} \mid x ^ {k} \right] = \nabla f _ {i} \left(x ^ {k}\right), \quad \forall i, k. \tag {5}
|
| 122 |
+
$$
|
| 123 |
+
|
| 124 |
+
Note that this assumption implies $\operatorname{E}\left[\frac{1}{n}\sum_{i = 1}^{n}g_i^k\mid x^k\right] = \nabla f(x^k)$ .
|
| 125 |
+
|
| 126 |
+
Assumption 3 $((L, \sigma^2)$ -expected smoothness). Function $f$ is $(L, \sigma^2)$ -smooth if there exist constants $L > 0$ and $\sigma^2 \geq 0$ such that $\forall i \in [n]$ and $\forall x^k \in \mathbb{R}^d$
|
| 127 |
+
|
| 128 |
+
$$
|
| 129 |
+
\mathrm {E} \left[ \left\| g _ {i} ^ {k} \right\| ^ {2} \right] \leq 2 L \left(f _ {i} \left(x ^ {k}\right) - f _ {i} ^ {\star}\right) + \sigma^ {2}, \tag {6}
|
| 130 |
+
$$
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
\mathrm {E} \left[ \left\| \frac {1}{n} \sum_ {i = 1} ^ {n} g _ {i} ^ {k} \right\| ^ {2} \right] \leq 2 L \left(f \left(x ^ {k}\right) - f ^ {\star}\right) + \sigma^ {2} / n, \tag {7}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
where $f_{i}^{\star}$ is the minimum functional value of $f_{i}$ and $[n] = \{1,2,\dots ,n\}$ .
|
| 137 |
+
|
| 138 |
+
This assumption generalizes standard smoothness and boundedness of variance assumptions. For more details and discussion, see the works of Gower et al. (2019); Stich (2019b). Equipped with these assumptions, we are ready to proceed with the convergence theory.
|
| 139 |
+
|
| 140 |
+
Theorem 2 (Convergence of DCSGD). Consider the DCSGD algorithm with $n \geq 1$ nodes. Let Assumptions 1-3 hold and $\mathcal{C} \in \mathbb{U}(\delta)$ , where $\delta_n = \frac{\delta - 1}{n} + 1$ . Let $D := \frac{2L}{n}\sum_{i=1}^{n}(f_i(x^\star) - f_i^\star)$ . Then there exist stepsizes $\eta^k \leq \frac{1}{2\delta_nL}$ and weights $w^k \geq 0$ such that for all $T \geq 1$ we have
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
\operatorname {E} \left[ f (\bar {x} ^ {T}) - f ^ {\star} \right] + \mu \operatorname {E} \left[ \left\| x ^ {T} - x ^ {\star} \right\| ^ {2} \right] \leq 6 4 \delta_ {n} L r ^ {0} \exp \left[ - \frac {\mu T}{4 \delta_ {n} L} \right] + 3 6 \frac {(\delta_ {n} - 1) D + \delta \sigma^ {2} / n}{\mu T},
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
where $r^0 = \left\| x^0 -x^\star \right\|^2$ , $W^{T} = \sum_{k = 0}^{T}w^{k}$ , and $\mathrm{Prob}(\bar{x}^T = x^k) = w^k /W^T$
|
| 147 |
+
|
| 148 |
+
If $\delta = 1$ (no compression), Theorem 2 recovers the optimal rate of Distributed SGD (Stich, 2019b). If $\delta > 1$ , there is an extra term $(\delta_n - 1)D$ in the convergence rate, which appears due to heterogeneity of data $(\sum_{i=1}^{n} \nabla f_i(x^{\star}) = 0$ , but $\sum_{i=1}^{n} \mathcal{C}(\nabla f_i(x^{\star})) \neq 0$ in general). In addition, the rate is negatively affected by extra variance due to presence of compression which leads to $L \to \delta_n L$ and $\sigma^2 / n \to \delta \sigma^2 / n$ .
|
| 149 |
+
|
| 150 |
+
Next we compare our rate to the best-known result for Error Feedback (Stich & Karimireddy, 2020) $(n = 1)$ , (Beznosikov et al., 2020) $(n \geq 1)$ used with $\mathcal{C} \in \mathbb{U}(\delta) \subset \mathbb{C}(\delta)$
|
| 151 |
+
|
| 152 |
+
$$
|
| 153 |
+
\operatorname {E} \left[ f (\bar {x} ^ {T}) - f ^ {\star} \right] = \tilde {\mathcal {O}} \left(\delta L r ^ {0} \exp \left[ - \frac {\mu T}{\delta L} \right] + \frac {\delta D + \sigma^ {2}}{\mu T}\right)
|
| 154 |
+
$$
|
| 155 |
+
|
| 156 |
+
One can note several disadvantages of Error Feedback (Alg. 2) with respect to plain DCSGD (Alg. 1). The first major drawback is that the effect of compression $\delta$ is not reduced with an increasing number of nodes. Another disadvantage is that Theorem 2 implies convergence for both the functional values and the last iterate, rather than for functional values only as it is the case for EF. On top of that, our rate of DCSGD as captured by Theorem 2 does not contain any hidden polylogarithmic factor comparing to EF. Another practical supremacy of DCSGD is that there is no need to store an extra vector for the error, which reduces the storage costs by a factor of two, making Algorithm 1 a viable choice for Deep Learning models with millions of parameters. Finally, one does not need to assume standard $L$ -smoothness in order to prove convergence in Theorem 2, while, one the other hand, $L$ -smoothness is an important building block for proving convergence for general compressors due to the presence of bias (Stich & Karimireddy, 2020; Beznosikov et al., 2020). The only term in which EF might outperform plain DCSGD is $\mathcal{O}(\sigma^2/\mu T)$ for which the corresponding term is $\mathcal{O}(\delta \sigma^2/n\mu T)$ . This is due to the fact that EF compensates for the error, while standard compression introduces extra variance. Note that this is not major issue as it is reasonable to assume $\delta/n = \mathcal{O}(1)$ or, in addition, $\sigma^2 = 0$ if weak growth condition holds (Vaswani et al., 2019), which is quite standard assumption, or one can remove effect of $\sigma^2$ by either computing full gradient locally or by incorporating variance reduction such as SVRG (Johnson & Zhang, 2013). In Section 4, we also discuss the way how to remove the effect of $D$ in Theorem 2. Putting all together, this suggests that standard DCSGD (Algorithm 1) is strongly preferable, in theory, to DCSGD with Error Feedback (Algorithm 2) for $\mathcal{C} \in \mathbb{U}(\delta)$ .
|
| 157 |
+
|
| 158 |
+
# 3 INDUCED COMPRESSOR: FIXING BIAS WITH ERROR-COMPRESSION
|
| 159 |
+
|
| 160 |
+
In the previous section, we showed that compressed DCSGD is theoretically preferable to DCSGD with Error Feedback for $\mathcal{C} \in \mathbb{U}(\delta)$ . Unfortunately, $\mathbb{C}(\delta) \not\subset \mathbb{U}(\delta)$ , an example being the Top- $K$ compressor (Alistarh et al., 2018; Stich et al., 2018). This compressors belongs to $\mathbb{C}\left(\frac{d}{K}\right)$ , but does not belong to $\mathbb{U}(\delta)$ for any $\delta$ . On the other hand, multiple unbiased alternatives to Top- $K$ have been proposed in the literature, including gradient sparsification (Wangni et al., 2018) and adaptive random sparsification (Beznosikov et al., 2020).
|
| 161 |
+
|
| 162 |
+
Induced Compressor. We now propose a general mechanism for constructing an unbiased compressor $\mathcal{C} \in \mathbb{U}$ from any biased compressor $\mathcal{C}_1 \in \mathbb{C}$ . We shall argue that it is preferable to use this induced compressor within DCSGD, in both theory and practice, to using the original biased compressor $\mathcal{C}_1$ within DCSGD + Error Feedback.
|
| 163 |
+
|
| 164 |
+
Theorem 3. For $\mathcal{C}_1\in \mathbb{C}(\delta_1)$ with $\lambda = 1$ , choose $\mathcal{C}_2\in \mathbb{U}(\delta_2)$ and define the induced compressor via
|
| 165 |
+
|
| 166 |
+
$$
|
| 167 |
+
\mathcal {C} (x) := \mathcal {C} _ {1} (x) + \mathcal {C} _ {2} (x - \mathcal {C} _ {1} (x)).
|
| 168 |
+
$$
|
| 169 |
+
|
| 170 |
+
The induced compression operator satisfies $\mathcal{C} \in \mathbb{U}(\delta)$ with $\delta = \delta_2(1 - 1 / \delta_1) + 1 / \delta_1$ .
|
| 171 |
+
|
| 172 |
+
To get some intuition about this procedure, recall the structure used in Error Feedback. The gradient estimator is first compressed with $\mathcal{C}_1(g)$ and the error $e = g - \mathcal{C}_1(g)$ is stored in memory and used to modify the gradient in the next iteration. In our proposed approach, instead of storing the error $e$ , we compress it with an unbiased compressor $\mathcal{C}_2$ (which can be seen as a parameter allowing flexibility in the design of the induced compressor) and communicate both of these compressed vectors. Note that this procedure results in extra variance as we do not work with the exact error, but with its unbiased estimate only. On the other hand, there is no bias and error accumulation that one needs to correct for. In addition, due to our construction, at least the same amount of information is sent to the master as in the case of plain $\mathcal{C}_1(g)$ : indeed, we send both $\mathcal{C}_1(g)$ and $\mathcal{C}_2(e)$ . The drawback of this is the necessity to send more bits. However, Theorem 3 provides the freedom in generating the induced compressor through the choice of the unbiased compressor $\mathcal{C}_2$ . In theory, it makes sense to choose $\mathcal{C}_2$ with similar compression factor to the compressor $\mathcal{C}_1$ we are transforming as this way the total number of communicated bits per iteration is preserved, up to the factor of two.
|
| 173 |
+
|
| 174 |
+
Remark: The $\operatorname{rtop}_{k_1,k_2}(x,y)$ operator proposed by Elibol et al. (2020) can be seen as a special case of our induced compressor with $x = y$ , $\mathcal{C}_1 = \operatorname{Top} - k_1$ and $\mathcal{C}_2 = \operatorname{Rand} - k_2$ .
|
| 175 |
+
|
| 176 |
+
Benefits of Induced Compressor. In the light of the results in Section 2, we argue that one should always prefer unbiased compressors to biased ones as long as their variances $\delta$ and communication complexities are the same, e.g., Rand- $K$ over Top- $K$ . In practice, biased/greedy compressors are in some settings observed to perform better due to their lower empirical variance (Beznosikov et al., 2020). These considerations give a practical significance to Theorem 3 as we demonstrate on the following example. Let us consider two compressors: one biased $\mathcal{C}_1 \in \mathbb{C}(\delta_1)$ and one unbiased $\mathcal{C}_2 \in \mathbb{U}(\delta_2)$ , such that $\delta_1 = \delta_2 = \delta$ , having identical communication complexity, e.g., Top- $K$ and Rand- $K$ . The induced compressor $\mathcal{C}(x) \coloneqq \mathcal{C}_1(x) + \mathcal{C}_2(x - \mathcal{C}_1(x))$ belongs to $\mathbb{U}(\delta_3)$ , where $\delta_3 = \delta - \left(1 - \frac{1}{\delta}\right) < \delta$ . While the size of the transmitted message is doubled, one can use Algorithm 1 since $\mathcal{C}$ is unbiased, which provides better convergence guarantees than Algorithm 2. Based on the construction of the induced compressor, one might expect that we need extra memory as "the error" $e = g - \mathcal{C}_1(g)$ needs to be stored, but during computation only. This is not an issue as compressors for DNNs are always applied layer-wise (Dutta et al., 2019), and hence the size of the extra memory is negligible. It does not help EF, as the error needs to be stored at any time for each layer.
|
| 177 |
+
|
| 178 |
+
# 4 EXTENSIONS
|
| 179 |
+
|
| 180 |
+
We now develop several extensions of Algorithm 1 relevant to distributed optimization in general, and to Federated Learning in particular. This is all possible due to the simplicity of our approach. Note that in the case of Error Feedback, these extensions have either not been obtained yet, or similarly to Section 2, the results are worse when compared to our derived bounds for unbiased compressors.
|
| 181 |
+
|
| 182 |
+
Partial Participation with Arbitrary Distribution over Nodes. In this section, we extend our results to a variant of DCSGD utilizing partial participation, which is of key relevance to Federated
|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
Figure 1: Comparison of Top-1 (+ EF) and NU Rand-1 on Example 1 from Beznosikov et al. (2020).
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
|
| 189 |
+
Learning. In this framework, only a subset of all nodes communicates to the master node in each communication round. Such a framework was analyzed before, but only for the case of uniform subsampling (Sattler et al., 2019; Reisizadeh et al., 2020). In our work, we consider a more general partial participation framework: we assume that the subset of participating clients is determined by a fixed but otherwise arbitrary random set-valued mapping $\mathbb{S}$ (a "sampling") with values in $2^{[n]}$ , where $[n] = \{1, 2, \dots, n\}$ . To the best of our knowledge, this is the first partial participation result for FL where an arbitrary distribution over the nodes is considered. On the other hand, this is not the first work which makes use of the arbitrary sampling paradigm; this was used before in other contexts, e.g., for obtaining importance sampling guarantees for coordinate descent (Qu et al., 2015), primal-dual methods (Chambolle et al., 2018), and variance reduction (Horváth & Richtárik, 2019).
|
| 190 |
+
|
| 191 |
+
Note that the sampling $\mathbb{S}$ is uniquely defined by assigning probabilities to all $2^n$ subsets of $[n]$ . With each sampling $\mathbb{S}$ we associate a probability matrix $\mathbf{P} \in \mathbb{R}^{n \times n}$ defined by $\mathbf{P}_{ij} \coloneqq \mathrm{Prob}(\{i,j\} \subseteq \mathbb{S})$ . The probability vector associated with $\mathbb{S}$ is the vector composed of the diagonal entries of $\mathbf{P}$ : $p = (p_1, \ldots, p_n) \in \mathbb{R}^n$ , where $p_i \coloneqq \mathrm{Prob}(i \in \mathbb{S})$ . We say that $\mathbb{S}$ is proper if $p_i > 0$ for all $i$ . It is easy to show that $b \coloneqq \operatorname{E}[|\mathbb{S}|] = \operatorname{Trace}(\mathbf{P}) = \sum_{i=1}^{n} p_i$ , and hence $b$ can be seen as the expected number of clients participating in each communication round.
|
| 192 |
+
|
| 193 |
+
There are two algorithmic changes due to this extension: line 4 of Algorithm 1 does not iterate over every node, only over nodes $i \in S^k$ , where $S^k \sim \mathbb{S}$ , and the aggregation step in line 9 is adjusted to lead to an unbiased estimator of the gradient, which gives $\Delta_k = \sum_{i \in S^k} \frac{1}{np_i} \Delta_i^k$ .
|
| 194 |
+
|
| 195 |
+
To prove convergence, we exploit the following lemma.
|
| 196 |
+
|
| 197 |
+
Lemma 4 (Lemma 1, Horváth & Richtárik (2019)). Let $\zeta_1, \zeta_2, \ldots, \zeta_n$ be vectors in $\mathbb{R}^d$ and let $\bar{\zeta} := \frac{1}{n}\sum_{i=1}^{n}\zeta_i$ be their average. Let $\mathbb{S}$ be a proper sampling. Then there exists $v \in \mathbb{R}^n$ such
|
| 198 |
+
|
| 199 |
+
$$
|
| 200 |
+
\mathbf {P} - p p ^ {\top} \preceq \operatorname {D i a g} \left(p _ {1} v _ {1}, p _ {2} v _ {2}, \dots , p _ {n} v _ {n}\right). \tag {8}
|
| 201 |
+
$$
|
| 202 |
+
|
| 203 |
+
Moreover, if $S\sim \mathbb{S}$ ,then
|
| 204 |
+
|
| 205 |
+
$$
|
| 206 |
+
\left. \operatorname {E} \left[ \left\| \sum_ {i \in S} \frac {\zeta_ {i}}{n p _ {i}} - \bar {\zeta} \right\| ^ {2} \right] \leq \frac {1}{n ^ {2}} \sum_ {i = 1} ^ {n} \frac {v _ {i}}{p _ {i}} \| \zeta_ {i} \| ^ {2}. \right. \tag {9}
|
| 207 |
+
$$
|
| 208 |
+
|
| 209 |
+
The following theorem establishes the convergence rate for Algorithm 1 with partial participation.
|
| 210 |
+
|
| 211 |
+
Theorem 5. Let Assumptions 1-3 hold and $\mathcal{C} \in \mathbb{U}(\delta)$ , then there exist step sizes $\eta^k \leq \frac{1}{2\delta_{\mathbb{S}}L}$ and weights $w^k \geq 0$ such that
|
| 212 |
+
|
| 213 |
+
$$
|
| 214 |
+
\mathrm {E} \left[ f (\bar {x} ^ {T}) - f ^ {\star} \right] + \mu \mathrm {E} \left[ \| x ^ {T} - x ^ {\star} \| ^ {2} \right] \leq 6 4 \delta_ {\mathbb {S}} L r ^ {0} \exp \left[ - \frac {\mu T}{4 \delta_ {\mathbb {S}} L} \right] + 3 6 \frac {\left(\delta_ {\mathbb {S}} - 1\right) D + (1 + a _ {\mathbb {S}}) \delta \sigma^ {2} / n}{\mu T},
|
| 215 |
+
$$
|
| 216 |
+
|
| 217 |
+
where $r^0, W^T, \bar{x}^T$ , and $D$ are defined in Theorem 2, $a_{\mathbb{S}} = \max_{i \in [n]} \left\{ \frac{v_i}{p_i} \right\}$ , and $\delta_{\mathbb{S}} = \frac{\delta a_{\mathbb{S}} + (\delta - 1)}{n} + 1$ .
|
| 218 |
+
|
| 219 |
+
For the case $\mathbb{S} = [n]$ with probability 1, one can show that Lemma 4 holds with $v = 0$ , and hence we exactly recover the results of Theorem 2. In addition, we can quantify the slowdown factor with respect to full participation regime (Theorem 2), which is $\delta \max_{i\in [n]}\frac{v_i}{p_i}$ . While in our framework we assume the distribution $\mathbb{S}$ to be fixed, it can be easily extended to several proper distributions $\mathbb{S}_j$ 's or we can even handle a block-cyclic structure with each block having an arbitrary proper distribution $\mathbb{S}_j$ over the given block $j$ combining our analysis with the results of Eichner et al. (2019).
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
|
| 225 |
+

|
| 226 |
+
Figure 2: Algorithm 1 vs. Algorithm 2 on CIFAR10 with ResNet18 (bottom), VGG11 (top) and TernGrad as a compression.
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
|
| 230 |
+
Obtaining Linear Convergence. Note that in all the previous theorems, we can only guarantee a sublinear $\mathcal{O}(1 / T)$ convergence rate. Linear rate is obtained in the special case when $D = 0$ and $\sigma^2 = 0$ . The first condition is satisfied, when $f_{i}^{\star} = f_{i}(x^{\star})$ for all $i\in [n]$ , thus when $x^{\star}$ is also minimizer of every local function $f_{i}$ . Furthermore, the effect of $D$ can be removed using compression of gradient differences, as pioneered in the DIANA algorithm (Mishchenko et al., 2019a). Note that $\sigma^2 = 0$ if weak growth condition holds (Vaswani et al., 2019). Moreover, one can remove effect of $\sigma^2$ by either computing full gradients locally or by incorporating variance reduction such as SVRG (Johnson & Zhang, 2013). It was shown by Horváth et al. (2019b) that both $\sigma^2$ and $D$ can be removed for the setting of Theorem 2. These results can be easily extended to partial participation using our proof technique for Theorem 5. Note that this reduction is not possible for Error Feedback as the analysis of the DIANA algorithm is heavily dependent on the unbiasedness property. This points to another advantage of the induced compressor framework introduced in Section 3.
|
| 231 |
+
|
| 232 |
+
Acceleration. We now comment on the combination of compression and acceleration/momentum. This setting is very important to consider as essentially all state-of-the-art methods for training deep learning models, including Adam (Kingma & Ba, 2015; Reddi et al., 2018), rely on the use of momentum in one form or another. One can treat the unbiased compressed gradient as a stochastic gradient (Gorbunov et al., 2020) and the theory for momentum SGD (Yang et al., 2016; Gadat et al., 2018; Loizou & Richtárik, 2017) would be applicable with an extra smoothness assumption. Moreover, it is possible to remove the variance caused by stochasticity and obtain linear convergence with an accelerated rate, which leads to the Accelerated DIANA method (Li et al., 2020). Similarly to our previous discussion, both of these techniques are heavily dependent on the unbiasedness property. It is an intriguing question, but out of the scope of the paper, to investigate the combined effect of momentum and Error Feedback and see whether these techniques are compatible theoretically.
|
| 233 |
+
|
| 234 |
+
# 5 EXPERIMENTS
|
| 235 |
+
|
| 236 |
+
In this section, we compare Algorithms 1 and 2 for several compression operators. If the method contains “+ EF”, it means that EF is applied, thus Algorithm 2 is applied. Otherwise, Algorithm 1 is displayed. To be fair, we always compare methods with the same communication complexity per iteration. All experimental details can be found in the Appendix.
|
| 237 |
+
|
| 238 |
+
Failure of DCSGD with biased Top-1. In this experiment, we present example considered in Beznosikov et al. (2020), which was used as a counterexample to show that some form of error correction is needed in order for biased compressors to work/provably converge. In addition, we run experiments on their construction and show that while Error Feedback fixes divergence, it is still significantly dominated by unbiased non-uniform sparsification(NU Rand-1), which works by only
|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
Figure 3: Comparison of different sparsification techniques with and without usage of Error Feedback on CIFAR10 with Resnet18 (top) and VGG11 (bottom). $K = 5\% * d$ , for Induced compressor $C_1$ is Top- $K/2$ and $C_2$ is Rand- $K/2$ (Wangni et al.).
|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
|
| 249 |
+
keeping one non-zero coordinate sampled with probability equal to $|x| / \sum_{i=1}^{d} |x|_i$ , where $|x|$ denotes element-wise absolute value, as can be seen in Figure 1. The details can be found in the Appendix.
|
| 250 |
+
|
| 251 |
+
Error Feedback for Unbiased Compression Operators. In our second experiment, we compare the effect of Error Feedback in the case when an unbiased compressor is used. Note that unbiased compressors are theoretically guaranteed to work both with Algorithm 1 and 2. We can see from Figure 2 that adding Error Feedback can hurt the performance; we use TernGrad (Wen et al., 2017) (coincides with QSGD (Alistarh et al., 2016) and natural dithering (Horváth et al., 2019a) with the infinity norm and one level) as compressors. This agrees with our theoretical findings. In addition, for sparsification techniques such as Random Sparsification or Gradient Sparsification (Wangni et al., 2018), we observed that when sparsity is set to be $10\%$ , Algorithm 1 converges for all the selected values of step-sizes, but Algorithm 2 diverges and a smaller step-size needs to be used. This is an important observation as many practical works (Li et al., 2014; Wei et al., 2015; Aji & Heafield, 2017; Hsieh et al., 2017; Lin et al., 2018b; Lim et al., 2018) use sparsification techniques mentioned in this section, but proposed to use EF, while our work shows that using unbiasedness property leads not only to better convergence but also to memory savings.
|
| 252 |
+
|
| 253 |
+
Unbiased Alternatives to Biased Compression. In this section, we investigate candidates for unbiased compressors than can compete with Top- $K$ , one of the most frequently used compressors. Theoretically, Top- $K$ is not guaranteed to work by itself and might lead to divergence (Beznosikov et al., 2020) unless Error Feedback is applied. One would usually compare the performance of Top- $K$ with EF to Rand- $K$ , which keeps $K$ randomly selected coordinates and then scales the output by $d / K$ to preserve unbiasedness. Rather than naively comparing to Rand- $K$ , we propose to use more nuanced unbiased approaches. The first one is Gradient Sparsification proposed by Wagni et al. (Wagni et al., 2018), which we refer to here as Rand- $K$ (Wangni et al.), where the probability of keeping each coordinate scales with its magnitude and communication budget. As the second alternative, we propose to use our induced compressor, where $C_1$ is Top- $a$ and unbiased part $C_2$ is Rand- $(K - a)$ (Wangni et al.) with communication budget $K - a$ . It should be noted that $a$ can be considered as a hyperparameter to tune. For our experiment, we chose it to be $K / 2$ for simplicity. Figure 3 suggests that our induced compressor outperforms all of its competitors as can be seen for both VGG11 and Resnet18. Moreover, induced compressor as well as Rand- $K$ do not require extra memory to store the error vector. Finally, Top- $K$ without EF suffers a significant decrease in performance, which stresses the necessity of error correction.
|
| 254 |
+
|
| 255 |
+
# 6 CONCLUSION
|
| 256 |
+
|
| 257 |
+
In this paper, we argue that if compressed communication is required for distributed training due to communication overhead, it is better to use unbiased compressors. We show that this leads to strictly better convergence guarantees with fewer assumptions. In addition, we propose a new construction for transforming any compressor into an unbiased one using a compressed EF-like approach. Besides theoretical superiority, usage of unbiased compressors enjoys lower memory requirements. Our theoretical findings are corroborated with empirical evaluation.
|
| 258 |
+
|
| 259 |
+
As a future work we plan to investigate the question of the appropriate choice of the inducing compressor $\mathcal{C}$ . Our preliminary studies show that there is much to be discovered here, both in theory and in terms of developing further practical guidelines to those already contained in this work. The question of (theoretically) optimizing for $\mathcal{C}_1$ and $\mathcal{C}_2$ is difficult, as it necessitates a deeper theoretical understanding of biased compressors, which is currently missing. An alternative is to impose some assumptions on the structure of gradients encountered during the iterative process, or to perform an extensive experimental evaluation on desired tasks to provide guidelines for practitioners.
|
| 260 |
+
|
| 261 |
+
# REFERENCES
|
| 262 |
+
|
| 263 |
+
Alham Fikri Aji and Kenneth Heafield. Sparse communication for distributed gradient descent. Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, 2017.
|
| 264 |
+
Dan Alistarh, Jerry Li, Ryota Tomioka, and Milan Vojnovic. QSGD: Randomized quantization for communication-optimal stochastic gradient descent. arXiv preprint arXiv:1610.02132, 2016.
|
| 265 |
+
Dan Alistarh, Torsten Hoefler, Mikael Johansson, Nikola Konstantinov, Sarit Khirirat, and Cédric Renggli. The convergence of sparsified gradient methods. In Advances in Neural Information Processing Systems, pp. 5973-5983, 2018.
|
| 266 |
+
Debraj Basu, Deepesh Data, Can Karakus, and Suhas Diggavi. Qsparse-local-SGD: Distributed SGD with quantization, sparsification and local computations. In Advances in Neural Information Processing Systems, pp. 14668-14679, 2019.
|
| 267 |
+
Aleksandr Beznosikov, Samuel Horvath, Peter Richtárik, and Mher Safaryan. On biased compression for distributed learning. arXiv preprint arXiv:2002.12410, 2020.
|
| 268 |
+
Léon Bottou, Frank E Curtis, and Jorge Nocedal. Optimization methods for large-scale machine learning. Siam Review, 60(2):223-311, 2018.
|
| 269 |
+
Antonin Chambolle, Matthias J Ehrhardt, Peter Richtárik, and Carola-Bibiane Schonlieb. Stochastic primal-dual hybrid gradient algorithm with arbitrary sampling and imaging applications. SIAM Journal on Optimization, 28(4):2783-2808, 2018.
|
| 270 |
+
Jean-Baptiste Cordonnier. Convex optimization using sparsified stochastic gradient descent with memory. Technical report, 2018.
|
| 271 |
+
Aritra Dutta, El Houcine Bergou, Ahmed M Abdelmoniem, Chen-Yu Ho, Atal Narayan Sahu, Marco Canini, and Panos Kalnis. On the discrepancy between the theoretical analysis and practical implementations of compressed communication for distributed deep learning. arXiv preprint arXiv:1911.08250, 2019.
|
| 272 |
+
Hubert Eichner, Tomer Koren, H Brendan McMahan, Nathan Srebro, and Kunal Talwar. Semi-cyclic stochastic gradient descent. arXiv preprint arXiv:1904.10120, 2019.
|
| 273 |
+
Melih Eibol, Lihua Lei, and Michael I Jordan. Variance reduction with sparse gradients. arXiv preprint arXiv:2001.09623, 2020.
|
| 274 |
+
Sébastien Gadat, Fabien Panloup, Sofiane Saadane, et al. Stochastic heavy ball. Electronic Journal of Statistics, 12(1):461-529, 2018.
|
| 275 |
+
WM Goodall. Television by pulse code modulation. Bell System Technical Journal, 30(1):33-49, 1951.
|
| 276 |
+
|
| 277 |
+
Eduard Gorbunov, Filip Hanzely, and Peter Richtárik. A unified theory of sgd: Variance reduction, sampling, quantization and coordinate descent. In The 23rd International Conference on Artificial Intelligence and Statistics, 2020.
|
| 278 |
+
Robert Mansel Gower, Nicolas Loizou, Xun Qian, Alibek Sailanbayev, Egor Shulgin, and Peter Richtárik. SGD: General analysis and improved rates. Proceedings of the 36th International Conference on Machine Learning, Long Beach, California, 2019.
|
| 279 |
+
Benjamin Grimmer. Convergence rates for deterministic and stochastic subgradient methods without Lipschitz continuity. SIAM Journal on Optimization, 29(2):1350-1365, 2019.
|
| 280 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770-778, 2016.
|
| 281 |
+
Samuel Horváth and Peter Richtárik. Nonconvex variance reduced optimization with arbitrary sampling. Proceedings of the 36th International Conference on Machine Learning, 2019.
|
| 282 |
+
Samuel Horváth, Chen-Yu Ho, L'udovit Horváth, Atal Narayan Sahu, Marco Canini, and Peter Richtárik. Natural compression for distributed deep learning. arXiv preprint arXiv:1905.10988, 2019a.
|
| 283 |
+
Samuel Horváth, Dmitry Kovalev, Konstantin Mishchenko, Sebastian Stich, and Peter Richtárik. Stochastic distributed learning with gradient quantization and variance reduction. arXiv preprint arXiv:1904.05115, 2019b.
|
| 284 |
+
Kevin Hsieh, Aaron Harlap, Nandita Vijaykumar, Dimitris Konomis, Gregory R Ganger, Phillip B Gibbons, and Onur Mutlu. Gaia: Geo-distributed machine learning approaching LAN speeds. In 14th Symposium on Networked Systems Design and Implementation, pp. 629-647, 2017.
|
| 285 |
+
Rie Johnson and Tong Zhang. Accelerating stochastic gradient descent using predictive variance reduction. In Advances in neural information processing systems, pp. 315-323, 2013.
|
| 286 |
+
Peter Kairouz, H Brendan McMahan, Brendan Avent, Aurélien Bellet, Mehdi Bennis, Arjun Nitin Bhagoji, Keith Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, et al. Advances and open problems in federated learning. arXiv preprint arXiv:1912.04977, 2019.
|
| 287 |
+
Hamed Karimi, Julie Nutini, and Mark Schmidt. Linear convergence of gradient and proximal-gradient methods under the polyak-łojasiewicz condition. In Joint European Conference on Machine Learning and Knowledge Discovery in Databases, pp. 795-811. Springer, 2016.
|
| 288 |
+
Sai Praneeth Karimireddy, Satyen Kale, Mehryar Mohri, Sashank J Reddi, Sebastian U Stich, and Ananda Theertha Suresh. Scaffold: Stochastic controlled averaging for on-device federated learning. arXiv preprint arXiv:1910.06378, 2019a.
|
| 289 |
+
Sai Praneeth Karimireddy, Quentin Rebjock, Sebastian U Stich, and Martin Jaggi. Error feedback fixes signSGD and other gradient compression schemes. arXiv preprint arXiv:1901.09847, 2019b.
|
| 290 |
+
Ahmed Khaled, Konstantin Mishchenko, and Peter Richtárik. Tighter theory for local SGD on identical and heterogeneous data. In The 23rd International Conference on Artificial Intelligence and Statistics (AISTATS 2020), 2020.
|
| 291 |
+
Sarit Khirirat, Hamid Reza Feyzmahdavian, and Mikael Johansson. Distributed learning with compressed gradients. arXiv preprint arXiv:1806.06573, 2018.
|
| 292 |
+
Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. Published as a conference paper at the 3rd International Conference for Learning Representations, San Diego, 2015.
|
| 293 |
+
Anastasia Koloskova, Sebastian U Stich, and Martin Jaggi. Decentralized stochastic optimization and gossip algorithms with compressed communication. arXiv preprint arXiv:1902.00340, 2019.
|
| 294 |
+
Jakub Konečny and Peter Richtárik. Randomized distributed mean estimation: Accuracy vs. communication. Frontiers in Applied Mathematics and Statistics, 4:62, 2018.
|
| 295 |
+
|
| 296 |
+
Simon Lacoste-Julien, Mark Schmidt, and Francis Bach. A simpler approach to obtaining an $\mathcal{O}(1 / t)$ convergence rate for the projected stochastic subgradient method. arXiv preprint arXiv:1212.2002, 2012.
|
| 297 |
+
Mu Li, David G Andersen, Jun Woo Park, Alexander J Smola, Amr Ahmed, Vanja Josifovski, James Long, Eugene J Shekita, and Bor-Yiing Su. Scaling distributed machine learning with the parameter server. In 11th {USENIX} Symposium on Operating Systems Design and Implementation ( {OSDI} 14), pp. 583-598, 2014.
|
| 298 |
+
Zhize Li, Dmitry Kovalev, Xun Qian, and Peter Richtárik. Acceleration for compressed gradient descent in distributed and federated optimization. arXiv preprint arXiv:2002.11364, 2020.
|
| 299 |
+
Hyeontaek Lim, David G Andersen, and Michael Kaminsky. 3LC: Lightweight and effective traffic compression for distributed machine learning. arXiv preprint arXiv:1802.07389, 2018.
|
| 300 |
+
Tao Lin, Sebastian U Stich, Kumar Kshitij Patel, and Martin Jaggi. Don't use large mini-batches, use local SGD. arXiv preprint arXiv:1808.07217, 2018a.
|
| 301 |
+
Yujun Lin, Song Han, Huizi Mao, Yu Wang, and William J Dally. Deep gradient compression: Reducing the communication bandwidth for distributed training. *ICLR 2018 - International Conference on Learning Representations*, 2018b.
|
| 302 |
+
Nicolas Loizou and Peter Richtárik. Momentum and stochastic momentum for stochastic gradient, Newton, proximal point and subspace descent methods. arXiv preprint arXiv:1712.09677, 2017.
|
| 303 |
+
Konstantin Mishchenko, Eduard Gorbunov, Martin Takáč, and Peter Richtárik. Distributed learning with compressed gradient differences. arXiv preprint arXiv:1901.09269, 2019a.
|
| 304 |
+
Konstantin Mishchenko, Filip Hanzely, and Peter Richtárik. $99\%$ of parallel optimization is inevitably a waste of time. arXiv preprint arXiv:1901.09437, 2019b.
|
| 305 |
+
Ion Necoara, Yu Nesterov, and Francois Glineur. Linear convergence of first order methods for non-strongly convex optimization. Mathematical Programming, 175(1-2):69-107, 2019.
|
| 306 |
+
Zheng Qu, Peter Richtárik, and Tong Zhang. Quartz: Randomized dual coordinate ascent with arbitrary sampling. In Advances in Neural Information Processing Systems, pp. 865-873, 2015.
|
| 307 |
+
Ali Ramezani-Kebrya, Fartash Faghri, and Daniel M Roy. NUQSGD: Improved communication efficiency for data-parallel SGD via nonuniform quantization. arXiv preprint arXiv:1908.06077, 2019.
|
| 308 |
+
Sashank J Reddi, Satyen Kale, and Sanjiv Kumar. On the convergence of Adam and beyond. *ICLR 2018 - International Conference on Learning Representations*, 2018.
|
| 309 |
+
Amirhossein Reisizadeh, Aryan Mokhtari, Hamed Hassani, Ali Jabbabaie, and Ramtin Pedarsani. Fedpaq: A communication-efficient federated learning method with periodic averaging and quantization. In International Conference on Artificial Intelligence and Statistics, pp. 2021-2031, 2020.
|
| 310 |
+
Peter Richtárik and Martin Takáč. Parallel coordinate descent methods for big data optimization. Mathematical Programming, 156(1-2):433-484, 2016.
|
| 311 |
+
Lawrence Roberts. Picture coding using pseudo-random noise. IRE Transactions on Information Theory, 8(2):145-154, 1962.
|
| 312 |
+
Felix Sattler, Simon Wiedemann, Klaus-Robert Müller, and Wojciech Samek. Robust and communication-efficient federated learning from non-iid data. IEEE transactions on neural networks and learning systems, 2019.
|
| 313 |
+
Frank Seide, Hao Fu, Jasha Droppo, Gang Li, and Dong Yu. 1-bit stochastic gradient descent and its application to data-parallel distributed training of speech dnns. In *Fifteenth Annual Conference of the International Speech Communication Association*, 2014.
|
| 314 |
+
Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. *ICLR* 2015 - International Conference on Learning Representations*, 2015.
|
| 315 |
+
|
| 316 |
+
Sebastian U Stich. Local SGD converges fast and communicates little. ICLR 2019 - International Conference on Learning Representations, 2019a.
|
| 317 |
+
Sebastian U Stich. Unified optimal analysis of the (stochastic) gradient method. arXiv preprint arXiv:1907.04232, 2019b.
|
| 318 |
+
Sebastian U Stich and Sai Praneeth Karimireddy. The error-feedback framework: Better rates for SGD with delayed gradients and compressed communication. *ICLR* 2020 - International Conference on Learning Representations, 2020.
|
| 319 |
+
Sebastian U Stich, Jean-Baptiste Cordonnier, and Martin Jaggi. Sparsified SGD with memory. In Advances in Neural Information Processing Systems, pp. 4447-4458, 2018.
|
| 320 |
+
Sharan Vaswani, Francis Bach, and Mark Schmidt. Fast and faster convergence of sgd for overparameterized models and an accelerated perceptron. In The 22nd International Conference on Artificial Intelligence and Statistics, pp. 1195-1204, 2019.
|
| 321 |
+
Thijs Vogels, Sai Praneeth Karimireddy, and Martin Jaggi. PowerSGD: Practical low-rank gradient compression for distributed optimization. In Advances in Neural Information Processing Systems, pp. 14236-14245, 2019.
|
| 322 |
+
Jianqiao Wangni, Jialei Wang, Ji Liu, and Tong Zhang. Gradient sparsification for communication-efficient distributed optimization. In Advances in Neural Information Processing Systems, pp. 1299-1309, 2018.
|
| 323 |
+
Jinliang Wei, Wei Dai, Aurick Qiao, Qirong Ho, Henggang Cui, Gregory R Ganger, Phillip B Gibbons, Garth A Gibson, and Eric P Xing. Managed communication and consistency for fast data-parallel iterative analytics. In Proceedings of the Sixth ACM Symposium on Cloud Computing, pp. 381-394, 2015.
|
| 324 |
+
Wei Wen, Cong Xu, Feng Yan, Chunpeng Wu, Yandan Wang, Yiran Chen, and Hai Li. Terngrad: Ternary gradients to reduce communication in distributed deep learning. In Advances in Neural Information Processing Systems, pp. 1509-1519, 2017.
|
| 325 |
+
Blake Woodworth, Kumar Kshitij Patel, Sebastian U Stich, Zhen Dai, Brian Bullins, H Brendan McMahan, Ohad Shamir, and Nathan Srebro. Is local SGD better than minibatch SGD? arXiv preprint arXiv:2002.07839, 2020.
|
| 326 |
+
Tianbao Yang, Qihang Lin, and Zhe Li. Unified convergence analysis of stochastic momentum methods for convex and non-convex optimization. arXiv preprint arXiv:1604.03257, 2016.
|
| 327 |
+
Hantian Zhang, Jerry Li, Kaan Kara, Dan Alistarh, Ji Liu, and Ce Zhang. Zipml: Training linear models with end-to-end low precision, and a little bit of deep learning. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 4035-4043. JMLR.org, 2017.
|
| 328 |
+
|
| 329 |
+

|
| 330 |
+
|
| 331 |
+

|
| 332 |
+
|
| 333 |
+

|
| 334 |
+
Figure 4: Algorithm 1 vs. Algorithm 2 on CIFAR10 with ResNet18 (bottom), VGG11 (top) and TernGrad as a compression.
|
| 335 |
+
|
| 336 |
+

|
| 337 |
+
|
| 338 |
+
# APPENDIX
|
| 339 |
+
|
| 340 |
+
# A EXPERIMENTAL DETAILS
|
| 341 |
+
|
| 342 |
+
To be fair, we always compare methods with the same communication complexity per iteration. We report the number of epochs (passes over the dataset) with respect to training loss and testing accuracy. The test accuracy is obtained by evaluating the best model in terms of validation accuracy. A validation accuracy is computed based on $10\%$ randomly selected training data. We tune the step-size using based on the training loss. For every experiment, we randomly distributed the training dataset among 8 workers; each worker computes its local gradient-based on its own dataset. We used a local batch size of 32. All the provided figures display the mean performance with one standard error over 5 independent runs. For a fair comparison, we use the same random seed for the compared methods. Our experimental results are based on a Python implementation of all the methods running in PyTorch. All reported quantities are independent of the system architecture and network bandwidth.
|
| 343 |
+
|
| 344 |
+
Dataset and Models. We do an evaluation on CIFAR10 dataset. We consider VGG11 (Simonyan & Zisserman, 2015) and ResNet18 (He et al., 2016) models and step-sizes 0.1, 0.05 and 0.01.
|
| 345 |
+
|
| 346 |
+
# A.1 EXTRA EXPERIMENTS
|
| 347 |
+
|
| 348 |
+
Momentum. In this extra experiment, we look at the effect of momentum on Algorithm 1 and 2. We set momentum to 0.9. Similarly to Figure 2, we work with the unbiased compressor, concretely TernGrad (Wen et al., 2017) (coincides with QSGD (Alistarh et al., 2016) and natural dithering (Horváth et al., 2019a) with the infinity norm and one level), to see the effect of adding Error Feedback. We can see from Figure 4 that adding Error Feedback can hurt the performance, which agrees with our theoretical findings.
|
| 349 |
+
|
| 350 |
+
# B EXAMPLE 1, BEZNOSIKOV ET AL. (2020)
|
| 351 |
+
|
| 352 |
+
In this section, we present example considered in Beznosikov et al. (2020), which was used as a counterexample to show that some form of error correction is needed in order for biased compressors to work/provably converge. In addition, we run experiments on their construction and show that while Error Feedback fixes divergence, it is still significantly dominated by unbiased non-uniform sparsification as can be seen in Figure 1. The construction follows.
|
| 353 |
+
|
| 354 |
+
Consider $n = d = 3$ and define the following smooth and strongly convex quadratic functions
|
| 355 |
+
|
| 356 |
+
$$
|
| 357 |
+
f _ {1} (x) = \langle a, x \rangle^ {2} + \frac {1}{4} \| x \| ^ {2}, \qquad f _ {2} (x) = \langle b, x \rangle^ {2} + \frac {1}{4} \| x \| ^ {2}, \qquad f _ {3} (x) = \langle c, x \rangle^ {2} + \frac {1}{4} \| x \| ^ {2},
|
| 358 |
+
$$
|
| 359 |
+
|
| 360 |
+
where $a = (-3,2,2), b = (2, -3,2), c = (2,2, -3)$ . Then, with the initial point $x^0 = (t,t,t)$ , $t > 0$
|
| 361 |
+
|
| 362 |
+
$$
|
| 363 |
+
\nabla f _ {1} (x ^ {0}) = \frac {t}{2} (- 1 1, 9, 9), \qquad \nabla f _ {2} (x ^ {0}) = \frac {t}{2} (9, - 1 1, 9), \qquad \nabla f _ {3} (x ^ {0}) = \frac {t}{2} (9, 9, - 1 1).
|
| 364 |
+
$$
|
| 365 |
+
|
| 366 |
+
Using the Top-1 compressor, we get
|
| 367 |
+
|
| 368 |
+
$$
|
| 369 |
+
\mathcal {C} (\nabla f _ {1} (x ^ {0})) = \frac {t}{2} (- 1 1, 0, 0), \quad \mathcal {C} (\nabla f _ {2} (x ^ {0})) = \frac {t}{2} (0, - 1 1, 0), \quad \mathcal {C} (\nabla f _ {3} (x ^ {0})) = \frac {t}{2} (0, 0, - 1 1).
|
| 370 |
+
$$
|
| 371 |
+
|
| 372 |
+
The next iterate of DCGD is
|
| 373 |
+
|
| 374 |
+
$$
|
| 375 |
+
x ^ {1} = x ^ {0} - \frac {\eta}{3} \sum_ {i = 1} ^ {3} \mathcal {C} (\nabla f _ {i} (x ^ {0})) = \left(1 + \frac {1 1 \eta}{6}\right) x ^ {0}.
|
| 376 |
+
$$
|
| 377 |
+
|
| 378 |
+
Repeated application gives $x^{k} = \left(1 + \frac{11\eta}{6}\right)^{k}x^{0}$ , which diverges exponentially fast to $+\infty$ since $\eta > 0$ .
|
| 379 |
+
|
| 380 |
+
As a initial point, we use $(1,1,1)^{\top}$ in our experiments and we choose step size $\frac{1}{L}$ , where $L$ is smoothness parameter of $f = \frac{1}{3} (f_1 + f_2 + f_3)$ . Note that zero vector is the unique minimizer of $f$ .
|
| 381 |
+
|
| 382 |
+
# C PROOFS
|
| 383 |
+
|
| 384 |
+
# C.1 PROOF OF LEMMA 1
|
| 385 |
+
|
| 386 |
+
We follow (2), which holds for $\mathcal{C} \in \mathbb{U}(\delta)$ .
|
| 387 |
+
|
| 388 |
+
$$
|
| 389 |
+
\begin{array}{l} \operatorname {E} \left[ \left\| \frac {1}{\delta} \mathcal {C} ^ {k} (x) - x \right\| ^ {2} \right] = \frac {1}{\delta^ {2}} \operatorname {E} \left[ \left\| \mathcal {C} ^ {k} (x) \right\| ^ {2} \right] - 2 \frac {1}{\delta} \left\langle \operatorname {E} \left[ \mathcal {C} ^ {k} (x) \right], x \right\rangle + \| x \| ^ {2} \\ \leq \left(\frac {1}{\delta} - \frac {2}{\delta} + 1\right) \| x \| ^ {2} \\ = \left(1 - \frac {1}{\delta}\right) \| x \| ^ {2}, \\ \end{array}
|
| 390 |
+
$$
|
| 391 |
+
|
| 392 |
+
which concludes the proof.
|
| 393 |
+
|
| 394 |
+
# C.2 PROOF OF THEOREM 2
|
| 395 |
+
|
| 396 |
+
We use the update of Algorithm 1 to bound the following quantity
|
| 397 |
+
|
| 398 |
+
$$
|
| 399 |
+
\mathrm {E} \left[ \left\| x ^ {k + 1} - x ^ {\star} \right\| ^ {2} | x ^ {k} \right] = \left\| x ^ {k} - x ^ {\star} \right\| ^ {2} - \frac {\eta^ {k}}{n} \sum_ {i = 1} ^ {n} \mathrm {E} \left[ \left\langle \mathcal {C} ^ {k} \left(g _ {i} ^ {k}\right), x ^ {k} - x ^ {\star} \right\rangle | x ^ {k} \right] +
|
| 400 |
+
$$
|
| 401 |
+
|
| 402 |
+
$$
|
| 403 |
+
\left(\frac {\eta^ {k}}{n}\right) ^ {2} \mathrm {E} \left[ \left\| \sum_ {i = 1} ^ {n} \mathcal {C} ^ {k} \left(g _ {i} ^ {k}\right) \right\| ^ {2} | x ^ {k} \right]
|
| 404 |
+
$$
|
| 405 |
+
|
| 406 |
+
$$
|
| 407 |
+
\stackrel {(2) + (5)} {\leq} \left\| x ^ {k} - x ^ {\star} \right\| ^ {2} - \eta^ {k} \left\langle \nabla f (x ^ {k}), x ^ {k} - x ^ {\star} \right\rangle +
|
| 408 |
+
$$
|
| 409 |
+
|
| 410 |
+
$$
|
| 411 |
+
\frac {(\eta^ {k}) ^ {2}}{n ^ {2}} \mathrm {E} \left[ \sum_ {i = 1} ^ {n} \left| \left| \mathcal {C} ^ {k} \left(g _ {i} ^ {k}\right) - g _ {i} ^ {k} \right| \right| ^ {2} + \left\| \sum_ {i = 1} ^ {n} g _ {i} ^ {k} \right\| ^ {2} | x ^ {k} \right]
|
| 412 |
+
$$
|
| 413 |
+
|
| 414 |
+
$$
|
| 415 |
+
\stackrel {(2)} {\leq} \quad \left\| x ^ {k} - x ^ {\star} \right\| ^ {2} - \eta^ {k} \left\langle \nabla f (x ^ {k}), x ^ {k} - x ^ {\star} \right\rangle +
|
| 416 |
+
$$
|
| 417 |
+
|
| 418 |
+
$$
|
| 419 |
+
\frac {(\eta^ {k}) ^ {2}}{n ^ {2}} \mathrm {E} \left[ (\delta - 1) \sum_ {i = 1} ^ {n} \left\| g _ {i} ^ {k} \right\| ^ {2} + \left\| \sum_ {i = 1} ^ {n} g _ {i} ^ {k} \right\| ^ {2} | x ^ {k} \right]
|
| 420 |
+
$$
|
| 421 |
+
|
| 422 |
+
$$
|
| 423 |
+
\stackrel {(6) + (7)} {\leq} \left\| x ^ {k} - x ^ {\star} \right\| ^ {2} - \eta^ {k} \left\langle \nabla f (x ^ {k}), x ^ {k} - x ^ {\star} \right\rangle +
|
| 424 |
+
$$
|
| 425 |
+
|
| 426 |
+
$$
|
| 427 |
+
2 L (\eta^ {k}) ^ {2} \left(\delta_ {n} (f (x ^ {k}) - f ^ {\star}) + (\delta_ {n} - 1) \frac {1}{n} \sum_ {i = 1} ^ {n} (f _ {i} (x ^ {\star}) - f _ {i} ^ {\star})\right) + (\eta^ {k}) ^ {2} \frac {\delta \sigma^ {2}}{n}
|
| 428 |
+
$$
|
| 429 |
+
|
| 430 |
+
$$
|
| 431 |
+
\stackrel {(4)} {\leq} \quad \left(1 - \mu \eta^ {k}\right) \left\| x ^ {k} - x ^ {\star} \right\| ^ {2} - 2 \eta^ {k} \left(1 - \eta^ {k} \delta_ {n} L\right) \left(f (x ^ {k}) - f ^ {\star}\right) +
|
| 432 |
+
$$
|
| 433 |
+
|
| 434 |
+
$$
|
| 435 |
+
\left(\eta^ {k}\right) ^ {2} \left(\left(\delta_ {n} - 1\right) D + \frac {\delta \sigma^ {2}}{n}\right).
|
| 436 |
+
$$
|
| 437 |
+
|
| 438 |
+
Taking full expectation and $\eta^k\leq \frac{1}{2\delta_nL}$ , we obtain
|
| 439 |
+
|
| 440 |
+
$$
|
| 441 |
+
\mathrm {E} \left[ \left\| x ^ {k + 1} - x ^ {\star} \right\| ^ {2} \right] \leq (1 - \mu \eta^ {k}) \mathrm {E} \left[ \left\| x ^ {k} - x ^ {\star} \right\| ^ {2} \right] - \eta^ {k} \mathrm {E} \left[ f \left(x ^ {k}\right) - f ^ {\star} \right] + (\eta^ {k}) ^ {2} \left((\delta_ {n} - 1) D + \frac {\delta \sigma^ {2}}{n}\right).
|
| 442 |
+
$$
|
| 443 |
+
|
| 444 |
+
The rest of the analysis is closely related to the one of Stich (2019b). We would like to point out that similar results to Stich (2019b) were also present in (Lacoste-Julien et al., 2012; Stich et al., 2018; Grimmer, 2019).
|
| 445 |
+
|
| 446 |
+
We first rewrite the previous inequality to the form
|
| 447 |
+
|
| 448 |
+
$$
|
| 449 |
+
r ^ {k + 1} \leq \left(1 - a \eta^ {k}\right) r ^ {k} - \eta^ {k} s ^ {k} + \left(\eta^ {k}\right) ^ {2} c, \tag {10}
|
| 450 |
+
$$
|
| 451 |
+
|
| 452 |
+
where $r^k = \operatorname{E}\left[\left\| x^k - x^\star \right\|^2\right]$ , $s^k = \operatorname{E}\left[f(x^k) - f^\star\right]$ , $a = \mu, c = (\delta_n - 1)D + \frac{\delta\sigma^2}{n}$ .
|
| 453 |
+
|
| 454 |
+
We proceed with lemmas that establish a convergence guarantee for every recursion of type (10).
|
| 455 |
+
|
| 456 |
+
Lemma 6. Let $\{r^k\}_{k\geq 0}$ , $\{s^k\}_{k\geq 0}$ be as in (10) for $a > 0$ and for constant step sizes $\eta^k \equiv \eta \coloneqq \frac{1}{d}$ , $\forall k \geq 0$ . Then it holds for all $T \geq 0$ :
|
| 457 |
+
|
| 458 |
+
$$
|
| 459 |
+
r ^ {T} \leq r ^ {0} \exp \left[ - \frac {a T}{d} \right] + \frac {c}{a d}.
|
| 460 |
+
$$
|
| 461 |
+
|
| 462 |
+
Proof. This follows by relaxing (10) using $\operatorname{E}\left[f(x^k) - f^\star\right] \geq 0$ , and unrolling the recursion
|
| 463 |
+
|
| 464 |
+
$$
|
| 465 |
+
r ^ {T} \leq (1 - a \eta) r ^ {T - 1} + c \gamma^ {2} \leq (1 - a \eta) ^ {T} r ^ {0} + c \eta^ {2} \sum_ {k = 0} ^ {T - 1} (1 - a \eta) ^ {k} \leq (1 - a \eta) ^ {T} r ^ {0} + \frac {c \eta}{a}. \tag {11}
|
| 466 |
+
$$
|
| 467 |
+
|
| 468 |
+

|
| 469 |
+
|
| 470 |
+
Lemma 7. Let $\{r^k\}_{k\geq 0}$ , $\{s^k\}_{k\geq 0}$ as in (10) for $a > 0$ and for decreasing step sizes $\eta^k \coloneqq \frac{2}{a(\kappa + k)}$ , $\forall k\geq 0$ , with parameter $\kappa \coloneqq \frac{2d}{a}$ , and weights $w^k \coloneqq (\kappa + k)$ . Then
|
| 471 |
+
|
| 472 |
+
$$
|
| 473 |
+
\frac {1}{W ^ {T}} \sum_ {k = 0} ^ {T} s ^ {k} w ^ {k} + a r ^ {T + 1} \leq \frac {2 a \kappa^ {2} r _ {0}}{T ^ {2}} + \frac {2 c}{a T},
|
| 474 |
+
$$
|
| 475 |
+
|
| 476 |
+
where $W^T \coloneqq \sum_{k=0}^{T} w^k$ .
|
| 477 |
+
|
| 478 |
+
Proof. We start by re-arranging (10) and multiplying both sides with $w^k$
|
| 479 |
+
|
| 480 |
+
$$
|
| 481 |
+
\begin{array}{l} s ^ {k} w ^ {k} \leq \frac {w ^ {k} \left(1 - a \eta^ {k}\right) r ^ {k}}{\eta^ {k}} - \frac {w ^ {k} r ^ {k + 1}}{\eta^ {k}} + c \eta^ {k} w ^ {k} \\ = a (\kappa + k) (\kappa + k - 2) r ^ {k} - a (\kappa + k) ^ {2} r ^ {k + 1} + \frac {c}{a} \\ \leq a (\kappa + k - 1) ^ {2} r ^ {k} - a (\kappa + k) ^ {2} r ^ {k + 1} + \frac {c}{a}, \\ \end{array}
|
| 482 |
+
$$
|
| 483 |
+
|
| 484 |
+
where the equality follows from the definition of $\eta^k$ and $w^k$ and the inequality from $(\kappa + k)(\kappa + k - 2) = (\kappa + k - 1)^2 - 1 \leq (\kappa + k - 1)^2$ . Again we have a telescoping sum:
|
| 485 |
+
|
| 486 |
+
$$
|
| 487 |
+
\frac {1}{W ^ {T}} \sum_ {k = 0} ^ {T} s ^ {k} w ^ {k} + \frac {a (\kappa + T) ^ {2} r ^ {T + 1}}{W ^ {T}} \leq \frac {a \kappa^ {2} r ^ {0}}{W ^ {T}} + \frac {c (T + 1)}{a W ^ {T}},
|
| 488 |
+
$$
|
| 489 |
+
|
| 490 |
+
with
|
| 491 |
+
|
| 492 |
+
- $W^{T} = \sum_{k=0}^{T} w^{k} = \sum_{k=0}^{T} (\kappa + k) = \frac{(2\kappa + T)(T + 1)}{2} \geq \frac{T(T + 1)}{2} \geq \frac{T^{2}}{2}$ ,
|
| 493 |
+
and $W^{T} = \frac{(2\kappa + T)(T + 1)}{2}\leq \frac{2(\kappa + T)(1 + T)}{2}\leq (\kappa +T)^{2}$ for $\kappa = \frac{2d}{a}\geq 1$
|
| 494 |
+
|
| 495 |
+
By applying these two estimates we conclude the proof.
|
| 496 |
+
|
| 497 |
+

|
| 498 |
+
|
| 499 |
+
The convergence can be obtained as the combination of these two lemmas.
|
| 500 |
+
|
| 501 |
+
Lemma 8. Let $\{r^k\}_{k\geq 0}$ , $\{s^k\}_{k\geq 0}$ as in (10), $a > 0$ . Then there exists stepsizes $\eta^k \leq \frac{1}{d}$ and weighs $w^k \geq 0$ , $W^T \coloneqq \sum_{k=0}^{T} w^k$ , such that
|
| 502 |
+
|
| 503 |
+
$$
|
| 504 |
+
\frac {1}{W ^ {T}} \sum_ {k = 0} ^ {T} s ^ {k} w ^ {k} + a r ^ {T + 1} \leq 3 2 d r _ {0} \exp \left[ - \frac {a T}{2 d} \right] + \frac {3 6 c}{a T}.
|
| 505 |
+
$$
|
| 506 |
+
|
| 507 |
+
Proof of Lemma 8. For integer $T \geq 0$ , we choose stepsizes and weights as follows
|
| 508 |
+
|
| 509 |
+
if $T\leq \frac{d}{a}$ $\eta^k = \frac{1}{d},$ $w^{k} = (1 - a\eta^{k})^{-(k + 1)} = \left(1 - \frac{a}{d}\right)^{-(k + 1)},$
|
| 510 |
+
|
| 511 |
+
if $T > \frac{d}{a}$ and $k < t_0$ , $\eta^k = \frac{1}{d}$ , $w^k = 0$ ,
|
| 512 |
+
|
| 513 |
+
if $T > \frac{d}{a}$ and $k\geq t_0$ , $\eta^k = \frac{2}{a(\kappa + k - t_0)}$ , $w^{k} = (\kappa +k - t_{0})^{2}$
|
| 514 |
+
|
| 515 |
+
for $\kappa = \frac{2d}{a}$ and $t_0 = \left\lceil \frac{T}{2} \right\rceil$ . We will now show that these choices imply the claimed result.
|
| 516 |
+
|
| 517 |
+
We start with the case $T \leq \frac{d}{a}$ . For this case, the choice $\eta = \frac{1}{d}$ gives
|
| 518 |
+
|
| 519 |
+
$$
|
| 520 |
+
\begin{array}{l} \frac {1}{W ^ {T}} \sum_ {k = 0} ^ {T} s ^ {k} w ^ {k} + a r ^ {T + 1} \leq (1 - a \eta) ^ {(T + 1)} \frac {r _ {0}}{\eta} + c \eta \\ \leq \frac {r _ {0}}{\eta} \exp \left[ - a \eta (T + 1) \right] + c \eta \\ \leq d r _ {0} \exp \left[ - \frac {a T}{d} \right] + \frac {c}{a T}. \\ \end{array}
|
| 521 |
+
$$
|
| 522 |
+
|
| 523 |
+
If $T > \frac{d}{a}$ , then we obtain from Lemma 6 that
|
| 524 |
+
|
| 525 |
+
$$
|
| 526 |
+
r ^ {t _ {0}} \leq r ^ {0} \exp \left[ - \frac {a T}{2 d} \right] + \frac {c}{a d}.
|
| 527 |
+
$$
|
| 528 |
+
|
| 529 |
+
From Lemma 7 we have for the second half of the iterates:
|
| 530 |
+
|
| 531 |
+
$$
|
| 532 |
+
\frac {1}{W ^ {T}} \sum_ {k = 0} ^ {T} s ^ {k} w ^ {k} + a r ^ {T + 1} = \frac {1}{W ^ {T}} \sum_ {k = t _ {0}} ^ {T} s ^ {k} w ^ {k} + a r ^ {T + 1} \leq \frac {8 a \kappa^ {2} r ^ {t _ {0}}}{T ^ {2}} + \frac {4 c}{a T}.
|
| 533 |
+
$$
|
| 534 |
+
|
| 535 |
+
Now we observe that the restart condition $r^{t_0}$ satisfies:
|
| 536 |
+
|
| 537 |
+
$$
|
| 538 |
+
\frac {a \kappa^ {2} r ^ {t _ {0}}}{T ^ {2}} = \frac {a \kappa^ {2} r ^ {0} \exp \left(- \frac {a T}{2 d}\right)}{T ^ {2}} + \frac {\kappa^ {2} c}{d T ^ {2}} \leq 4 a r ^ {0} \exp \left[ - \frac {a T}{2 d} \right] + \frac {4 c}{a T},
|
| 539 |
+
$$
|
| 540 |
+
|
| 541 |
+
because $T > \frac{d}{a}$ . These conclude the proof.
|
| 542 |
+
|
| 543 |
+

|
| 544 |
+
|
| 545 |
+
Having these general convergence lemmas for the recursion of the form (10), the proof of the theorem follows directly from Lemmas 6 and 8 with $a = \mu$ , $c = \sigma^2$ , $d = 2\delta_n L$ . It is easy to check that condition $\eta^k \leq \frac{1}{d} = \frac{1}{2\delta_n L}$ is satisfied.
|
| 546 |
+
|
| 547 |
+
# C.3 PROOF OF THEOREM 3
|
| 548 |
+
|
| 549 |
+
We have to show that our new compression is unbiased and has bounded variance. We start with the first property with $\lambda = 1$ .
|
| 550 |
+
|
| 551 |
+
$$
|
| 552 |
+
\begin{array}{l} \operatorname {E} \left[ \mathcal {C} _ {1} (x) + \mathcal {C} _ {2} (x - \mathcal {C} _ {1} (x)) \right] = \operatorname {E} _ {\mathcal {C} _ {1}} \left[ \operatorname {E} _ {\mathcal {C} _ {2}} \left[ \mathcal {C} _ {1} (x) + \mathcal {C} _ {2} (x - \mathcal {C} _ {1} (x)) | \mathcal {C} _ {1} (x) \right] \right] \\ = \operatorname {E} _ {\mathcal {C} _ {1}} \left[ \mathcal {C} _ {1} (x) + x - \mathcal {C} _ {1} (x) \right] = x, \\ \end{array}
|
| 553 |
+
$$
|
| 554 |
+
|
| 555 |
+
where the first equality follows from tower property and the second from unbiasedness of $\mathcal{C}_2$ . For the second property, we also use tower property
|
| 556 |
+
|
| 557 |
+
$$
|
| 558 |
+
\begin{array}{l} \left. \right. \operatorname {E} \left[ \| \mathcal {C} _ {1} (x) - x + \mathcal {C} _ {2} (x - \mathcal {C} _ {1} (x)) \| ^ {2} \right] = \operatorname {E} _ {\mathcal {C} _ {1}} \left[ \operatorname {E} _ {\mathcal {C} _ {2}} \left[ \| \mathcal {C} _ {1} (x) - x + \mathcal {C} _ {2} (x - \mathcal {C} _ {1} (x)) \| ^ {2} | \mathcal {C} _ {1} (x) \right]\right] \\ \leq (\delta_ {2} - 1) \mathrm {E} _ {\mathcal {C} _ {1}} \left[ \| \mathcal {C} _ {1} (x) - x \| ^ {2} \right] \\ \leq \left(\delta_ {2} - 1\right) \left(1 - \frac {1}{\delta_ {1}}\right) \| x \| ^ {2}, \\ \end{array}
|
| 559 |
+
$$
|
| 560 |
+
|
| 561 |
+
where the first and second inequalities follow directly from (2) and (3).
|
| 562 |
+
|
| 563 |
+
# C.4 PROOF OF LEMMA 4 (HORVÁTH & RICTÁRÍK, 2019)
|
| 564 |
+
|
| 565 |
+
For the first part of the claim, it was shown that $\mathbf{P} - pp^{\top}$ is positive semidefinite (Richtárik & Takáč, 2016), thus we can bound $\mathbf{P} - pp^{\top} \preceq n\mathbf{Diag}(\mathbf{P} - pp^{\top}) = \mathbf{Diag}(p \circ v)$ , where $v_{i} = n(1 - p_{i})$ which implies that (8) holds for this choice of $v$ .
|
| 566 |
+
|
| 567 |
+
For the second part of the claim, let $1_{i\in \mathbb{S}} = 1$ if $i\in \mathbb{S}$ and $1_{i\in \mathbb{S}} = 0$ otherwise. Likewise, let $1_{i,j\in \mathbb{S}} = 1$ if $i,j\in \mathbb{S}$ and $1_{i,j\in \mathbb{S}} = 0$ otherwise. Note that $\operatorname {E}[1_{i\in \mathbb{S}}] = p_i$ and $\operatorname {E}[1_{i,j\in \mathbb{S}}] = p_{ij}$ . Next, let us compute the mean of $X\coloneqq \sum_{i\in \mathbb{S}}\frac{\zeta_i}{np_i}$ :
|
| 568 |
+
|
| 569 |
+
$$
|
| 570 |
+
\operatorname {E} [ X ] = \operatorname {E} \left[ \sum_ {i \in \mathbb {S}} \frac {\zeta_ {i}}{n p _ {i}} \right] = \operatorname {E} \left[ \sum_ {i = 1} ^ {n} \frac {\zeta_ {i}}{n p _ {i}} 1 _ {i \in \mathbb {S}} \right] = \sum_ {i = 1} ^ {n} \frac {\zeta_ {i}}{n p _ {i}} \operatorname {E} \left[ 1 _ {i \in \mathbb {S}} \right] = \frac {1}{n} \sum_ {i = 1} ^ {n} \zeta_ {i} = \bar {\zeta}. \tag {12}
|
| 571 |
+
$$
|
| 572 |
+
|
| 573 |
+
Let $\mathbf{A} = [a_1, \ldots, a_n] \in \mathbb{R}^{d \times n}$ , where $a_i = \frac{\zeta_i}{p_i}$ , and let $e$ be the vector of all ones in $\mathbb{R}^n$ . We now write the variance of $X$ in a form which will be convenient to establish a bound:
|
| 574 |
+
|
| 575 |
+
$$
|
| 576 |
+
\begin{array}{l} \operatorname {E} \left[ \| X - \operatorname {E} [ X ] \| ^ {2} \right] = \operatorname {E} \left[ \| X \| ^ {2} \right] - \| \operatorname {E} [ X ] \| ^ {2} \\ = \mathrm {E} \left[ \left\| \sum_ {i \in \mathbb {S}} \frac {\zeta_ {i}}{n p _ {i}} \right\| ^ {2} \right] - \| \bar {\zeta} \| ^ {2} \\ = \mathrm {E} \left[ \sum_ {i, j} \frac {\zeta_ {i} ^ {\top}}{n p _ {i}} \frac {\zeta_ {j}}{n p _ {j}} 1 _ {i, j \in \mathbb {S}} \right] - \| \bar {\zeta} \| ^ {2} \\ = \sum_ {i, j} p _ {i j} \frac {\zeta_ {i} ^ {\top}}{n p _ {i}} \frac {\zeta_ {j}}{n p _ {j}} - \sum_ {i, j} \frac {\zeta_ {i} ^ {\top}}{n} \frac {\zeta_ {j}}{n} \\ = \frac {1}{n ^ {2}} \sum_ {i, j} \left(p _ {i j} - p _ {i} p _ {j}\right) a _ {i} ^ {\top} a _ {j} \\ = \frac {1}{n ^ {2}} e ^ {\top} \left(\left(\mathbf {P} - p p ^ {\top}\right) \circ \mathbf {A} ^ {\top} \mathbf {A}\right) e. \tag {13} \\ \end{array}
|
| 577 |
+
$$
|
| 578 |
+
|
| 579 |
+
Since by assumption we have $\mathbf{P} - p p^{\top} \preceq \mathbf{Diag}(p \circ v)$ , we can further bound
|
| 580 |
+
|
| 581 |
+
$$
|
| 582 |
+
e ^ {\top} \left(\left(\mathbf {P} - p p ^ {\top}\right) \circ \mathbf {A} ^ {\top} \mathbf {A}\right) e \leq e ^ {\top} \left(\mathbf {D i a g} (p \circ v) \circ \mathbf {A} ^ {\top} \mathbf {A}\right) e = \sum_ {i = 1} ^ {n} p _ {i} v _ {i} \| a _ {i} \| ^ {2}.
|
| 583 |
+
$$
|
| 584 |
+
|
| 585 |
+
To obtain (9), it remains to combine this with (13).
|
| 586 |
+
|
| 587 |
+
# C.5 PROOF OF THEOREM 5
|
| 588 |
+
|
| 589 |
+
Similarly to the proof of Theorem 2, we use the update of Algorithm 1 to bound the following quantity
|
| 590 |
+
|
| 591 |
+
$$
|
| 592 |
+
\operatorname {E} \left[ \left\| x ^ {k + 1} - x ^ {\star} \right\| ^ {2} | x ^ {k} \right] \quad = \quad \left\| x ^ {k} - x ^ {\star} \right\| ^ {2} - \eta^ {k} \sum_ {i = 1} ^ {n} \operatorname {E} \left[ \left\langle \sum_ {i \in S ^ {k}} \frac {1}{n p _ {i}} \mathcal {C} ^ {k} \left(g _ {i} ^ {k}\right), x ^ {k} - x ^ {\star} \right\rangle | x ^ {k} \right] +
|
| 593 |
+
$$
|
| 594 |
+
|
| 595 |
+
$$
|
| 596 |
+
\mathrm {E} \left[ \left\| \sum_ {i \in S ^ {k}} \frac {\eta^ {k}}{n p _ {i}} \mathcal {C} ^ {k} \left(g _ {i} ^ {k}\right) \right\| ^ {2} | x ^ {k} \right]
|
| 597 |
+
$$
|
| 598 |
+
|
| 599 |
+
$$
|
| 600 |
+
\stackrel {(2) + (5)} {\leq} \quad \left\| x ^ {k} - x ^ {\star} \right\| ^ {2} - \eta^ {k} \left\langle \nabla f (x ^ {k}), x ^ {k} - x ^ {\star} \right\rangle +
|
| 601 |
+
$$
|
| 602 |
+
|
| 603 |
+
$$
|
| 604 |
+
\left. \right. (\eta^ {k}) ^ {2} \left(\mathrm {E} \left[\left\| \sum_ {i \in S ^ {k}} \frac {1}{n p _ {i}} \mathcal {C} ^ {k} \left(g _ {i} ^ {k}\right) - \frac {1}{n} \sum_ {i = 1} ^ {n} \mathcal {C} ^ {k} \left(g _ {i} ^ {k}\right)\right\| ^ {2} | x ^ {k} \right] + \mathrm {E} \left[\left\| \frac {1}{n} \sum_ {i = 1} ^ {n} \mathcal {C} ^ {k} \left(g ^ {k}\right)\right\| ^ {2} | x ^ {k} \right]\right)
|
| 605 |
+
$$
|
| 606 |
+
|
| 607 |
+
$$
|
| 608 |
+
\begin{array}{l} \stackrel {(2) + (5) + (9)} {\leq} \left\| x ^ {k} - x ^ {\star} \right\| ^ {2} - \eta^ {k} \left\langle \nabla f (x ^ {k}), x ^ {k} - x ^ {\star} \right\rangle + \\ \frac {(\eta^ {k}) ^ {2}}{n ^ {2}} \mathrm {E} \left[ \sum_ {i = 1} ^ {n} \left(\frac {\delta v _ {i}}{p _ {i}} + \delta - 1\right) \| g _ {i} ^ {k} \| + \left\| \sum_ {i = 1} ^ {n} g _ {i} ^ {k} \right\| ^ {2} | x ^ {k} \right] \\ \end{array}
|
| 609 |
+
$$
|
| 610 |
+
|
| 611 |
+
$$
|
| 612 |
+
\stackrel {(4) + (6) + (7)} {\leq} \quad \left(1 - \mu \eta^ {k}\right) \left\| x ^ {k} - x ^ {\star} \right\| ^ {2} - 2 \eta^ {k} \left(1 - \eta^ {k} \delta_ {\mathbb {S}} L\right) \left(f \left(x ^ {k}\right) - f ^ {\star}\right) +
|
| 613 |
+
$$
|
| 614 |
+
|
| 615 |
+
$$
|
| 616 |
+
\left(\eta^ {k}\right) ^ {2} \left(\left(\delta_ {\mathbb {S}} - 1\right) D + \left(1 + a _ {\mathbb {S}}\right) \frac {\delta \sigma^ {2}}{n}\right).
|
| 617 |
+
$$
|
| 618 |
+
|
| 619 |
+
Taking full expectation and $\eta^k\leq \frac{1}{2\delta_{\mathrm{S}}L}$ , we obtain
|
| 620 |
+
|
| 621 |
+
$$
|
| 622 |
+
\mathrm {E} \left[ \left\| x ^ {k + 1} - x ^ {\star} \right\| ^ {2} \right] \leq (1 - \mu \eta^ {k}) \mathrm {E} \left[ \left\| x ^ {k} - x ^ {\star} \right\| ^ {2} \right] - \eta^ {k} \mathrm {E} \left[ f (x ^ {k}) - f ^ {\star} \right] + (\eta^ {k}) ^ {2} \left((\delta_ {\mathbb {S}} - 1) D + (1 + a _ {\mathbb {S}}) \frac {\delta \sigma^ {2}}{n}\right).
|
| 623 |
+
$$
|
| 624 |
+
|
| 625 |
+
The rest of the analysis is identical to the proof of Theorem 2 with only difference $c = (\delta_{\mathbb{S}} - 1)D + (1 + a_{\mathbb{S}})\frac{\delta\sigma^2}{n}$ .
|
abetteralternativetoerrorfeedbackforcommunicationefficientdistributedlearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f867074fea77bfc0380d29e14b6881d2075c699d55e911a64e0c1074b7d941f
|
| 3 |
+
size 613012
|
abetteralternativetoerrorfeedbackforcommunicationefficientdistributedlearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:85fd9fa5d81fb0ae1729f7147b7143da89edd8c7cfba650d85c287080eb1c50c
|
| 3 |
+
size 759371
|
ablockminifloatrepresentationfortrainingdeepneuralnetworks/ad74685f-245b-4cd8-b019-5a662efbb258_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3958fa42567478b69b36c9418c0eddfc139068ddc52b14a17a3b4aab8b1af866
|
| 3 |
+
size 89280
|
ablockminifloatrepresentationfortrainingdeepneuralnetworks/ad74685f-245b-4cd8-b019-5a662efbb258_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4099d8b67003fe1271f72fb59e73330f9a5dffd79c6cf7bdb5b13b23347fa714
|
| 3 |
+
size 108019
|
ablockminifloatrepresentationfortrainingdeepneuralnetworks/ad74685f-245b-4cd8-b019-5a662efbb258_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:22670535ccf8db0b116fc3b58a89ce7b3ac71528c2c5536be46e2466a807cc91
|
| 3 |
+
size 876830
|
ablockminifloatrepresentationfortrainingdeepneuralnetworks/full.md
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A BLOCK MINIFLOAT REPRESENTATION FOR TRAINING DEEP NEURAL NETWORKS
|
| 2 |
+
|
| 3 |
+
Sean Fox, Seyedramin Rasoulinezhad, Julian Faraone, David Boland & Philip Leong
|
| 4 |
+
|
| 5 |
+
School of Electrical and Information Engineering
|
| 6 |
+
|
| 7 |
+
The University of Sydney
|
| 8 |
+
|
| 9 |
+
Sydney, NSW 2006, AUS
|
| 10 |
+
|
| 11 |
+
{first}. {last}@sydney.edu.au
|
| 12 |
+
|
| 13 |
+
# ABSTRACT
|
| 14 |
+
|
| 15 |
+
Training Deep Neural Networks (DNN) with high efficiency can be difficult to achieve with native floating-point representations and commercially available hardware. Specialized arithmetic with custom acceleration offers perhaps the most promising alternative. Ongoing research is trending towards narrow floating-point representations, called minifloats, that pack more operations for a given silicon area and consume less power. In this paper, we introduce Block Minifloat (BM), a new spectrum of minifloat formats capable of training DNNs end-to-end with only 4-8 bit weight, activation and gradient tensors. While standard floating-point representations have two degrees of freedom, via the exponent and mantissa, BM exposes the exponent bias as an additional field for optimization. Crucially, this enables training with fewer exponent bits, yielding dense integer-like hardware for fused multiply-add (FMA) operations. For ResNet trained on ImageNet, 6-bit BM achieves almost no degradation in floating-point accuracy with FMA units that are $4.1 \times (23.9 \times)$ smaller and consume $2.3 \times (16.1 \times)$ less energy than FP8 (FP32). Furthermore, our 8-bit BM format matches floating-point accuracy while delivering a higher computational density and faster expected training times.
|
| 16 |
+
|
| 17 |
+
# 1 INTRODUCTION
|
| 18 |
+
|
| 19 |
+
The energy consumption and execution time associated with training Deep Neural Networks (DNNs) is directly related to the precision of the underlying numerical representation. Most commercial accelerators, such as NVIDIA Graphics Processing Units (GPUs), employ conventional floating-point representations due to their standard of use and wide dynamic range. However, double-precision (FP64) and single-precision (FP32) formats have relatively high memory bandwidth requirements and incur significant hardware overhead for general matrix multiplication (GEMM). To reduce these costs and deliver training at increased speed and scale, representations have moved to 16-bit formats, with NVIDIA and Google providing FP16 (IEEE-754, 2019) and Bfloat16 (Kalamkar et al., 2019) respectively. With computational requirements for DNNs likely to increase, further performance gains are necessary in both datacenter and edge devices, where there are stricter physical constraints.
|
| 20 |
+
|
| 21 |
+
New number representations must be easy to use and lead to high accuracy results. Recent 8-bit floating-point representations have shown particular promise, achieving equivalent FP32 accuracy over different tasks and datasets (Wang et al., 2018; Sun et al., 2019). We refer to such representations as minifloats in this paper. Minifloats are ideal candidates for optimization. By varying the number of exponent and mantissa bits, many formats can be explored for different trade-offs of dynamic range and precision. These include logarithmic and fixed point representations which provide substantial gains in speed and hardware density compared to their floating-point counterparts. For instance, 32-bit integer adders are approximately $10 \times$ smaller and $4 \times$ more energy efficient than comparative FP16 units (Dally, 2015). That said, fixed point representations still lack the dynamic range necessary to represent small gradients for backpropagation, and must be combined with other techniques for training convergence.
|
| 22 |
+
|
| 23 |
+
Block floating point (BFP) in (Yang et al., 2019; Drumond et al., 2018) share exponents across blocks of 8-bit integer numbers, and provide a type of coarse-grained dynamic range for training. This
|
| 24 |
+
|
| 25 |
+
approach will typically incur some accuracy loss on more challenging datasets, however all dot-products within the block can be computed with dense fixed point logic. In comparison, HFP8 (Sun et al., 2019) minifloats require larger floating-point units (expensive FP16 adders in particular) but have at least 5 exponent bits dedicated to each gradient and suffer zero degradation in training accuracy. It would seem that an ideal representation should bridge the gap between each of these approaches. Our work achieves this for 8-bit and sub 8-bit precision schemes, overcoming two key challenges in the process. These are listed below and discussed with related works.
|
| 26 |
+
|
| 27 |
+
# 1.1 CHALLENGES AND RELATED WORK
|
| 28 |
+
|
| 29 |
+
Minimising data loss with fewer bits: While several works have demonstrated training with fewer than 8 bits of precision, they typically lead to loss of accuracy on more complex problems and have performance bottlenecks because parts of the algorithm are left in high precision (Hubara et al., 2017; Zhou et al., 2016; Miyashita et al., 2016). Therefore, training end-to-end with reduced precision representations that are persistent remains a key challenge. In this regard, 8 bit tensors with 16-bit updates can be trained effectively (Banner et al., 2018). Data loss arises when formats do not have enough range to capture variations in tensor distributions during training. BFloat (Kalamkar et al., 2019) adds two extra exponent bits for a custom 16-bit representation, and the Apex library is used in (Micikevicius et al., 2017; Wang et al., 2018; Sun et al., 2019) for scaling the loss function into a numerically representable range. Block floating point and other variants apply similar functionality for fixed point numbers, but at a finer granularity. WAGE (Wu et al., 2018) uses layer-wise scaling factors, SWALP (Yang et al., 2019) shares exponents across feature maps and convolution channels, and HBFP (Drumond et al., 2018) does the same for dot-products, though their implementation requires caching of intermediate activations in FP32 and wide weight storage for better accuracy. S2FP8 (Cambier et al., 2020) replaces loss-scaling in FP8 (Wang et al., 2018) with squeeze and shift factors that center 8-bit minifloats over the mean exponent of the value distribution. Shift factors operate similarly to BFP shared exponents, whereas squeeze factors can divert precision away from high value regions leading to errors in dot-product calculations. We provide some empirical evidence of this effect in Section 4.5. Finally, HFP8 (Sun et al., 2019) defines two minifloat formats that are optimized for range and precision requirements of forward and backward paths separately. In this work, we seek minifloat formats that are also optimized for arithmetic density.
|
| 30 |
+
|
| 31 |
+
Increasing the performance density of floating-point: Most DNN training frameworks are developed with GEMM accumulation in FP32. The authors in (Wang et al., 2018) reduced the accumulation width to FP16 with chunk-based computations and stochastic rounding. However, training minibloats with even denser dot-products has not been demonstrated. For DNN inference, ELMA (Johnson, 2018) and posit number systems (Gustafson & Yonemoto, 2017) describe arithmetic that accumulate minibfloat-like numbers as integers. Such work is applicable when the number of exponent bits is small, however training under such regimes can lead to data loss due to limited dynamic range.
|
| 32 |
+
|
| 33 |
+
# 1.2 CONTRIBUTIONS
|
| 34 |
+
|
| 35 |
+
In this paper, we present the Block Minifloat (BM) representation which addresses both of the aforementioned challenges. BM is a modification of block floating point that replaces the fixed point values with minifloats, whilst maintaining shared exponents across blocks of numbers. BM formats generalise a far wider spectrum of reduced precision representations and produce better outcomes than previous 8-bit regimes. Specific contributions of our work include:
|
| 36 |
+
|
| 37 |
+
- Block Minifloat (BM), a more efficient alternative to INT8 and FP8 for end-to-end DNN training. Shared exponent biases provide dynamic range and accuracy, while small exponent encodings provide fine-grained dynamic range and reduce the hardware cost of GEMM accumulation.
|
| 38 |
+
- A new 8-bit floating-point format that uses no more than 4 exponent bits, achieving equivalent accuracy to floating-point with denser hardware via efficient Kulisch accumulation.
|
| 39 |
+
- An exploration of the BM design space showing high accuracy DNN training with sub 8-bit representations for all weights, activations and gradients. This includes two techniques for minimising data loss of a practical implementation, namely gradual underflow and cost-aware block designs.
|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
(a) Minifloat
|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
(b) BM (Shared exp. bias)
|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
Figure 1: Minifloat and Block Minifloat (BM) tensor representations
|
| 49 |
+
Figure 2: Exponent bias shifts the minifloat distribution to align with the maximum exponent of the value distribution
|
| 50 |
+
|
| 51 |
+
# 2 BLOCK MINIFLOAT REPRESENTATION
|
| 52 |
+
|
| 53 |
+
# 2.1 MINIFLOAT NUMBER FORMAT
|
| 54 |
+
|
| 55 |
+
Equation (1) computes the real value of a minifloat number, where $(e,m)$ denote the number of exponent and mantissa bits in the representation.
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
X \langle e, m \rangle = \left\{ \begin{array}{l l} E = 0, & (- 1) ^ {s} \times 2 ^ {1 - \beta} \\ \text {o t h e r w i s e}, & (- 1) ^ {s} \times 2 ^ {E - \beta} \end{array} \right. \times (0 + F \times 2 ^ {- m}) \quad \text {(d e n o r m a l)} \tag {1}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+
The decimal expansions of the exponent and mantissa are both unsigned integers, given by $E$ and $F$ respectively, $s$ refers to the sign bit and $\beta = 2^{e - 1} - 1$ is the exponent bias for the binary-offset encoding scheme. This is consistent with IEEE-754 floating-point standards, except that our minifloats are considerably smaller (4-8 bits only), can generalise to multiple $(e,m)$ configurations, and do not handle nan/infinity bit patterns. Instead, arithmetic is allowed to saturate at the limits of the representable range $[X_{min}^{+},X_{max}^{+}]$ . For example, a minifloat representation with $X\langle 4,3\rangle$ have exponent and mantissas that range between $[0,15]$ and $[0,7]$ respectively. Therefore, the largest normal and smallest denormal positive numbers are $X_{max}^{+} = 480$ and $X_{min}^{+} = 2^{-9}$ . This corresponds to a dynamic range of 108 dB shown in Table 7 in Appendix A.1.
|
| 62 |
+
|
| 63 |
+
# 2.2 SHARED EXPONENT BIAS
|
| 64 |
+
|
| 65 |
+
The main difference between minifloat and block minifloat (BM) representations are highlighted in Figure 1. Minifloats have one exponent per element, but that exponent must be wide enough to tolerate changes in DNN tensor distributions during training (i.e. 5 bits for gradients in FP8 (Wang et al., 2018)). In contrast, BM share exponent biases across blocks of $N$ minifloat numbers. This provides equivalent dynamic range with respect to the block, but with fewer exponent bits than the original minifloat format. Block floating point (BFP) operates similarly, but all numbers within the block are integers (Drumond et al., 2018). BM can generalise for this case, i.e. when $e = 0$ .
|
| 66 |
+
|
| 67 |
+
The real value of the $i^{th}$ element from BM tensor $a$ is given in Equation (2), where $X$ is an unbiased minifloat tensor, represented by $(e,m)$ exponent and mantissa bits, and $\beta_{a}$ is the shared exponent bias.
|
| 68 |
+
|
| 69 |
+
$$
|
| 70 |
+
a _ {i} = X _ {i} \left\langle e, m \right\rangle \times 2 ^ {- \beta_ {a}} \tag {2}
|
| 71 |
+
$$
|
| 72 |
+
|
| 73 |
+
In this example, $a_{i}$ can only be represented accurately when the shared exponent bias $\beta_{a}$ (calculated for the entire tensor) and the distribution of $X$ jointly captures the value distribution of $a$ . For example, large and small values in $a$ could saturate or be lost altogether if $\beta_{a}$ is too large or too small. However, some leeway exists when exponents are shared across dot products. This is because dot products are reduce operations, meaning their sum is dominated by the largest values in the inputs. For this reason, we calculate $\beta_{a}$ to specifically guard against overflow, and unlike (Cambier et al., 2020) we don't apply any scaling which could divert precision away from larger value regions. Our method of updating $\beta$ during training is illustrated in Figure 2 and formalized in Equation 3 below.
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
\beta_ {a} = \max \left(\left\lfloor \log_ {2} | a | \right\rfloor\right) - \left(2 ^ {e} - 1\right) \tag {3}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
The first term denotes the maximum exponent for the tensor $a$ , which changes and must be updated during training, while the second term is fixed and refers to the maximum exponent of $X$ .
|
| 80 |
+
|
| 81 |
+
In terms of hardware, shared biases ensure that all dot products can be computed with denser minifloat arithmetic. This is shown in Equation 4 for BM tensors $a$ and $b$ , each with $N$ elements.
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
a \cdot b = \sum_ {i = 1} ^ {N} \left(\left(X _ {i} ^ {a} \times 2 ^ {- \beta_ {a}}\right) \times \left(X _ {i} ^ {b} \times 2 ^ {- \beta_ {b}}\right)\right) = 2 ^ {- \left(\beta_ {a} + \beta_ {b}\right)} \times \left(X ^ {a} \cdot X ^ {b}\right) \tag {4}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
The dot product, $X^a \cdot X^b$ , have minifloat formats with smaller exponents, while the cost of calculating, storing and aligning the exponent biases during training is amortized over the length of the dot-product. Next we show how minifloat formats with fewer exponent bits lead to faster and more compact hardware.
|
| 88 |
+
|
| 89 |
+
# 2.3 KULISCH ACCUMULATION
|
| 90 |
+
|
| 91 |
+
A Kulisch accumulator (Kulisch & Miranker, 2014) is a fixed point accumulator that is wide enough to compute an error free sum of scalar floating-point products, over the entire range of possible values. Kulisch accumulators operate by shifting the mantissa of the floating-point product into an internal register according to the exponent of the product. The sum proceeds as integer addition which is $4 - 10 \times$ more efficient in terms of area and power compared to FP16 (Dally, 2015). The number of bits required for the internal register (i.e. the addend) and shifter, scale the size and complexity of the accumulator and are provided as formulas in Equation (5) for BM operands $a = (e_a, m_a)$ and $b = (e_b, m_b)$ .
|
| 92 |
+
|
| 93 |
+
Table 1: Kulisch accumulator examples
|
| 94 |
+
|
| 95 |
+
<table><tr><td rowspan="2">Format (ea, ma)/(eb, mb)</td><td colspan="2">Kulisch Acc.</td></tr><tr><td>kadd</td><td>kshift</td></tr><tr><td>(8, 23)/(8, 23)</td><td>561</td><td>512</td></tr><tr><td>(5, 2)/(6, 1)</td><td>102</td><td>96</td></tr><tr><td>(4, 3)/(5, 2)</td><td>56</td><td>48</td></tr><tr><td>(3, 4)/(4, 3)</td><td>34</td><td>24</td></tr><tr><td>(2, 3)/(3, 2)</td><td>20</td><td>12</td></tr><tr><td>INT8</td><td>32</td><td>-</td></tr></table>
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
k a d d = 1 + \left(2 ^ {e _ {a}} + m _ {a} + 1\right) + \left(2 ^ {e _ {b}} + m _ {b} + 1\right) \tag {5}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
k s h i f t = 2 ^ {e _ {a}} + 2 ^ {e _ {b}} \tag {6}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
In the above equations, $kadd$ calculates the number of bits required for the largest product of two numbers, plus one extra bit for the addition, and $kshift$ determines the maximum number of bits the mantissa product must be shifted to align with the addend. Crucially, by considering the size of $kadd$ and $kshift$ , BM formats can be designed to trade-off fine-grained dynamic range (i.e. exponent bits) for more precision and smaller hardware. In fact, formats with exponents up to 4 bits may yield $kadd$ of approximately the same size as INT8/INT32 arithmetic units, while $kadd$ becomes prohibitively wider and more expensive for larger exponents. This is clearly shown via example in Table 1 above, but more importantly, it is supported by hardware synthesis results given in Section 5 and Appendix A.4. For example, an 8-bit minifloat format having 4 exponent bits achieves a $1.6\times$ area reduction compared to HFP8 (Sun et al., 2019) with 5 exponent bits. Furthermore, through an extensive set of experiments we discover that such representations can also achieve high training accuracy which forms a key contribution of our work.
|
| 106 |
+
|
| 107 |
+
# 3 TRAINING WITH BLOCK MINIFLOAT
|
| 108 |
+
|
| 109 |
+
# 3.1 MINIMIZING DATA LOSS
|
| 110 |
+
|
| 111 |
+
BM arithmetic will incur data loss when the value distribution is too wide or requires more precision than can be captured by the underlying minifloat representation within a block. Below, we describe steps taken to mitigate this problem without substantially increasing implementation overheads.
|
| 112 |
+
|
| 113 |
+
Gradual underflow: Our minifloats support denormal numbers as defined in Equation (1). Denormal numbers have precision close to zero, and ensure that consecutively smaller quantized numbers approach zero gradually. The alternative is flush-to-zero which discards the mantissa bits when $E = 0$ . This equates to approximately $12.5\%$ of the exponent encoding when $e = 3$ ; this is highly inefficient. Overhead for denormal numbers in hardware is minimal, and only requires detection of $E = 0$ and a single bit flip in the multiplier. Our experiments show that gradual underflow is crucial for BM formats with less than four exponent bits.
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
(a) Fwd activation
|
| 117 |
+
|
| 118 |
+

|
| 119 |
+
(b) Bwd activation grad.
|
| 120 |
+
Figure 3: End-to-end Training with Block Minifloat (BM). All off-chip memory transfers are low precision BM tensors. BM alignments, weight updates, quantization, batchnormalization and ReLU are executed in on-chip scalar FP32 units. The register file (RF) stores a block of $\nabla W$ .
|
| 121 |
+
|
| 122 |
+

|
| 123 |
+
(c) Bwd weight grad. and update
|
| 124 |
+
|
| 125 |
+
Block Size: Matrix multiplication with BM is computed by dividing tensors into $N \times N$ blocks that bound the number of exponent biases and reduce data loss, since each block shares one exponent bias. Square blocks are chosen so that biases are contiguous in memory regardless of whether the block is operating in the forward path or after transposition in the backward path. As such, BM can be stored with a persistent data structure, that doesn't require recasting or extra memory transfers during training. This makes BM easy to use at the software level, but does mean that biases are shared across $N$ independent dot products. In terms of hardware cost, Equation 7 formalizes the relationship between the size of $N$ and three overheads, where $\alpha$ refers to relative area costs for each overhead.
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\cos t = \overbrace {\alpha_ {1} \left(1 + \frac {\log_ {2} N}{k a d d}\right)} ^ {\text {K u l i s c h}} + \overbrace {\alpha_ {2} \frac {1}{N}} ^ {\text {F P}} + \overbrace {\alpha_ {3} \frac {8 + N ^ {2}}{N ^ {2}}} ^ {\text {M e m o r y}} \tag {7}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
For the first term, the width of the Kulisch accumulator must increase by $\log_2N$ bits to prevent overflow in the BM dot-products. In the second term, floating-point hardware (including quantization and conversion modules) are required to accumulate, align and convert BM partial results, but the cost is amortized over $N$ fixed point operations. Finally, in the last term, additional memory is required to compute and store one 8-bit bias every $N\times N$ values. For large block sizes, the extra silicon area from Equation 7 is negligible compared to the GEMM but data loss from sharing biases can still be significant. In Section 4.5, we used Equation 7 and determined that a block size of $N = 48$ offers a good balance for both objectives and is used for the rest of this paper.
|
| 132 |
+
|
| 133 |
+
Hybrid representation: Different minifloat representations for forward and backward paths have been shown to produce better accuracy for FP8 training (Sun et al., 2019). We apply the same idea, and find the best balance of precision and range for both paths separately. Full details for all BM formats are provided in Table 7 (in Appendix A.1) where forward and backward configurations are given by $(e_f, m_f) / (e_b, m_b)$ notation. Our formats cover each precision level between 4 and 8 bits, and are denoted by BM4, BM5, BM6, BM7 and BM8 in our experiments. For example, BM6 $(2,3) / (3,2)$ refers to 6-bit BM training with weight and activation tensors represented by $(2,3)$ and activation gradient tensors represented by $(3,2)$ minifloat formats.
|
| 134 |
+
|
| 135 |
+
# 3.2 TRAINING DETAILS AND GPU SIMULATION
|
| 136 |
+
|
| 137 |
+
BM offers an alternative to standard FP32 for the computationally intensive parts of training, which is typically mapped to general matrix multiplication (GEMM). However, specialised hardware is required to realise its potential gains in speed and energy efficiency. For the purposes of this paper, we simulate the behaviour of BM hardware using GPUs and PyTorch $^{1}$ . Given that dot products are computed exactly via Kulisch accumulators, existing CUDA libraries for GEMM can be used without modification, and all data loss is attributed to quantization only. Figure 3 provides an illustration of the setup for each GEMM in forward and backward paths. In a practical implementation, BM does not require any costly movement or storage of high precision tensors. This is enabled by scalar processors after the GEMM (for FP32 operations, Kulisch to floating-point conversion, block minifloat alignments, quantization etc.) and a weight update scheme that can compute and cache high precision gradients on-chip (Sun et al., 2019). Weight, activation and gradient tensors are quantized to BM numbers with stochastic rounding as described in (Wang et al., 2018). For the software simulation, quantization is applied before each GEMM in forward and backward paths and contributes significant performance overhead compared to standard PyTorch layers. An approximate
|
| 138 |
+
|
| 139 |
+
$5 \times$ slow-down is realised on most networks and datasets, with support for denormal numbers the main implementation bottleneck. The realisation of the same function is comparatively cheap in custom hardware however, and can be fully-pipelined for fast training times.
|
| 140 |
+
|
| 141 |
+
# 4 EXPERIMENTS
|
| 142 |
+
|
| 143 |
+
We evaluated the training accuracy of BM on a subset of image, language and object detection modelling tasks. The entire spectrum of representations were explored on ImageNet (Deng et al., 2009) and CIFAR (Krizhevsky et al., 2009) image recognition benchmarks, with results compared against well-calibrated INT8, FP8 and FP32 baselines. On other tasks, BM8 is compared with an FP32 baseline.
|
| 144 |
+
|
| 145 |
+
Table 2: Final Validation Accuracy (%) on CIFAR datasets for ResNet-18
|
| 146 |
+
|
| 147 |
+
<table><tr><td>Scheme</td><td>CIFAR-10</td><td>CIFAR-100</td></tr><tr><td>FP32 (ours)</td><td>94.9</td><td>77.5</td></tr><tr><td>BM6 (2,3)/(3,2)</td><td>95.1</td><td>77.2</td></tr><tr><td>BM5 (2,2)/(3,1)</td><td>94.7</td><td>76.1</td></tr><tr><td>BM4 (2,1)/(3,0)</td><td>94.2</td><td>73.7</td></tr></table>
|
| 148 |
+
|
| 149 |
+
Table 3: Training Accuracy (%) on CIFAR-10 for VGG16 and log quantization
|
| 150 |
+
|
| 151 |
+
<table><tr><td>CIFAR-10</td><td>FP32</td><td>Log</td><td>∇</td><td>kshift(bits)</td></tr><tr><td>log-5b¹</td><td>94.1</td><td>93.8</td><td>-0.3</td><td>32</td></tr><tr><td>log-BM5 (ours)</td><td>93.8</td><td>93.4</td><td>-0.4</td><td>32</td></tr><tr><td>log-BM4 (ours)</td><td>93.8</td><td>93.1</td><td>-0.7</td><td>16</td></tr></table>
|
| 152 |
+
|
| 153 |
+
$^{1}$ (Miyashita et al., 2016)
|
| 154 |
+
2 results achieved with base $\sqrt{2}$
|
| 155 |
+
|
| 156 |
+
# 4.1 CIFAR-10 AND CIFAR-100
|
| 157 |
+
|
| 158 |
+
We ran CIFAR experiments using SGD with momentum of 0.9 for 200 epochs in batches of 128 images and initial learning rate of 0.1 which is decayed by a factor of 5 at the 60th, 120th and 160th epochs. Table 2 presents results for training ResNet-18 (He et al., 2016) with only small BM6, BM5 and BM4 representations. These offer the highest reduction in memory usage while still reaching very close to the FP32 baseline. For example, 6-bit BM training only records a $0.3\%$ loss in accuracy compared to FP32 on CIFAR-100 while theoretically saving $25\%$ of memory read and write overheads compared to FP8. We also tested logarithmic BM formats on CIFAR-10 and VGG16 network. Log representations arise when $m = 0$ , and require only adds and shifts for multiply-add arithmetic. Our results use the same training parameters as before and are shown in Table 3. We compare against the only previously known result for log training, i.e. $\log -5b$ (Miyashita et al., 2016) and achieve similar results with respect to FP32 for 5-bit and 4-bit. BM representations have exponent biases that shifts tensor distributions dynamically during training, whereas $\log -5b$ define offset parameters at each layer that are fixed. Allowing biases to vary during training gives BM an advantage, and results in similar validation accuracy with only 4-bit words. This corresponds to approximately half the cost for multiplication in the linear domain (by exponent add and Kulisch shift).
|
| 159 |
+
|
| 160 |
+
Table 4: Top-1 accuracy (%) of reduced precision (RP) training on ImageNet for ResNet-18 models
|
| 161 |
+
|
| 162 |
+
<table><tr><td rowspan="2">Scheme</td><td colspan="5">Numerical representation (e,m)</td><td colspan="2">ResNet-18</td></tr><tr><td>w</td><td>x</td><td>dw</td><td>dx</td><td>acc</td><td>FP32</td><td>RP</td></tr><tr><td>SWALP (Yang et al., 2019)</td><td>81</td><td>81</td><td>81</td><td>81</td><td>321</td><td>70.3</td><td>65.8</td></tr><tr><td>S2FP8 (Cambier et al., 2020)</td><td>(5,2)/(8,23)</td><td>(5,2)</td><td>(5,2)</td><td>(5,2)</td><td>(8,23)</td><td>70.3</td><td>69.6</td></tr><tr><td>HFP8 (Sun et al., 2019)</td><td>(4,3)</td><td>(4,3)</td><td>(5,10)</td><td>(5,2)</td><td>(5,10)</td><td>69.4</td><td>69.4</td></tr><tr><td>BM8 (2,5)/(4,3)</td><td>(2,5)</td><td>(2,5)</td><td>(6,9)</td><td>(4,3)</td><td>311</td><td>69.7</td><td>69.8</td></tr><tr><td>BM7 (2,4)/(4,2)</td><td>(2,4)</td><td>(2,4)</td><td>(6,9)</td><td>(4,2)</td><td>291</td><td>69.7</td><td>69.6</td></tr><tr><td>BM6 (2,3)/(3,2)</td><td>(2,3)</td><td>(2,3)</td><td>(6,9)</td><td>(3,2)</td><td>201</td><td>69.7</td><td>69.0</td></tr><tr><td>BM5 (2,2)/(3,1)</td><td>(2,2)</td><td>(2,2)</td><td>(6,9)</td><td>(3,1)</td><td>181</td><td>69.7</td><td>66.8</td></tr></table>
|
| 163 |
+
|
| 164 |
+
1 Fixed point
|
| 165 |
+
|
| 166 |
+
# 4.2 IMAGENET
|
| 167 |
+
|
| 168 |
+
The ImageNet dataset has 1000 class labels, and consists of 256x256 images split into a training set with 1.28 million images and validation set with 50,000 images. We use ResNet-18 (He et al., 2016) and AlexNet (Krizhevsky et al., 2012) architectures from the official PyTorch implementation $^2$ , and train on one GPU with standard settings; SGD with momentum of 0.9, batches of 256 images, and an initial learning rate of 0.1 (0.01 for AlexNet) which is decayed by a factor of 10 at epoch 30 and 60. ResNet-18 has been widely tested upon in previous work, and offers the most suitable benchmark for exploring the full spectrum of BM representations, especially given the size of the network as well as the cost of BM quantization on training times (approx. $5 \times$ slow-down). Results are presented in Table 4 where columns w, x, dw, dx and acc refer to the numerical representation for weight, activation, weight gradient, activation gradient and on-chip GEMM accumulator. We achieve FP32 equivalent accuracy for BM8 and BM7, slight degradation for BM6, while our BM5 exceeds the reported accuracy for 8-bit SWALP (Yang et al., 2019). Compared to S2FP8 (Cambier et al., 2020), our BM6 representation reaches similar levels of relative accuracy, but with two fewer bits and without a high precision master copy of the weights. We provide some insights into possible reasons for this in Section 4.5 by considering the possibility of diminishing returns in accuracy from scaling minifloat representations. Compared with HFP8 (Sun et al., 2019), which offers robust 8-bit training results, BM8 produces the same accuracy on ImageNet while improving upon HFP8 in hardware density and performance. BM8 tensors can be represented with fewer exponent bits, and thus perform dot products via Kulisch accumulators that are smaller and faster than FP16 units. Furthermore, BM offers tradeoffs for even denser arithmetic and lower memory usage. In these regimes, BM hardware is more comparable to SWALP (Yang et al., 2019) which performs the GEMM in fixed point. Proof of BM design efficiencies are provided with RTL synthesis results in Section 5, Figure 6.
|
| 169 |
+
|
| 170 |
+
<table><tr><td>Model (Dataset) [Metric]</td><td>FP32</td><td>BM8</td></tr><tr><td>AlexNet (ImageNet)</td><td>56.0</td><td>56.2</td></tr><tr><td>EfficientNet-b0 (small ImageNet)</td><td>62.6</td><td>61.8</td></tr><tr><td>LSTM (PTB)[Val ppl.]</td><td>84.7</td><td>87.33</td></tr><tr><td>Transformer-base (IWSLT)[BLEU]</td><td>32.3</td><td>31.8</td></tr><tr><td>SSD-Lite (MbNetV2) (VOC)[mAP]</td><td>68.6</td><td>68.0</td></tr></table>
|
| 171 |
+
|
| 172 |
+
Table 5: Baseline FP32 v BM8 training on Image, Language and Object Detection models
|
| 173 |
+
|
| 174 |
+

|
| 175 |
+
Figure 4: Validation perplexity of LSTM model on Penn Treebank
|
| 176 |
+
|
| 177 |
+
# 4.3 LANGUAGE MODELLING WITH LSTM
|
| 178 |
+
|
| 179 |
+
We compared 8-bit formats for language modeling on the Penn Treebank dataset Marcus et al. (1993). We adapted the 2-layer Long Short Term Memory (LSTM) network from PyTorch Examples and perform all GEMM operations with BM8 arithmetic. The batch size is 20, initial learning rate is 20 with 0.25 decay, the embedding and hidden dimensions are 650 and sequence length is 35. Results in Figure 4 show BM8 with $(2,5)/(4,3)$ hybrid configuration achieving better accuracy than BFP8 and HFP8 variants. The proposed BM8 representation has more fine-grained dynamic range and fewer mantissa bits than BFP8, and more precision and fewer exponent bits than HFP8 formats. This design point achieves better outcomes in terms of accuracy and hardware density than either representation separately (see Figure 6, Section 5). Validation perplexity of 87.33 is also comparable to 84.70 obtained with full-precision floating-point.
|
| 180 |
+
|
| 181 |
+
# 4.4 ADDITIONAL EXPERIMENTS
|
| 182 |
+
|
| 183 |
+
To demonstrate wider applicability of the BM number representation, we tested BM8 on several additional networks and modelling tasks. Results are summarized in Table 5 with full details of each
|
| 184 |
+
|
| 185 |
+
experiment provided in Appendix A.3. Crucially, every network tested achieved comparable accuracy with baseline FP32. This includes EfficientNet-b0 (Tan & Le, 2019) image classification and SSDlite (Liu et al., 2016) with MobileNet-V2 object detection models, both of which represent the type of network and application well suited to resource constrained hardware devices. Furthermore, we also trained a small Transformer network for translation on the IWSLT German to English dataset (Cettolo et al., 2014). In future work, we plan to scale our implementation and demonstrate training with BM representations on larger networks and datasets. Network design with BM is another interesting research direction, since the majority of network architectures have been designed and optimized while assuming an FP32 arithmetic scheme.
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
(a) Validation Accuracy: Training with denormal numbers on ImageNet
|
| 189 |
+
|
| 190 |
+

|
| 191 |
+
(b) HW (left axis) vs Range (right axis): Selecting the block size
|
| 192 |
+
(c) Minifloat scaling by varying the exponent base
|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
Figure 5: Experiments for minimising data loss with 6-bit Block Minifloat (BM6)
|
| 196 |
+
|
| 197 |
+
# 4.5 EMPIRICAL ANALYSIS
|
| 198 |
+
|
| 199 |
+
Effect of Denormal Numbers: To study the effect that denormal numbers have on training convergence in sub 8-bit networks, we trained ResNet-18 on ImageNet for BM6 with denormals (ours) and without denormals, using QPyTorch library (Zhang et al., 2019). Results are plotted against floating-point accuracy in Figure 5a. Without denormals, small numbers are flushed-to-zero and training stagnates immediately. Although not shown here, 8-bit representations with more than $e = 3$ bits do not suffer similar accuracy degradation without denormals. This investigation confirms the importance of denormal numbers for training BM formats with fewer exponent bits, and differentiates our software and hardware experiments substantially from previous 8-bit regimes.
|
| 200 |
+
|
| 201 |
+
Selecting the Block Size: We conducted experiments on CIFAR100 to determine suitable block sizes - those which simultaneously increase dynamic range and have low hardware overhead. Results are shown in Figure 5b. We took the average of the largest range observed in gradient tensors at different block settings, over the entire duration of training. Estimates of area come from Equation 7 with parameters; $\alpha_{1} = 1$ , $\alpha_{2} = 10$ (relative area of fixed point and floating-point respectively), $kadd = 21$ and $\alpha_{3} = 0$ . We saturate the area cost at $N = 256$ , which is consistent with the length of dot-products supported by the GEMM architecture in TPU hardware (Jouppi et al., 2017). Finally, $N = 48$ emerged as a good selection, corresponding to one floating-point unit every 48 multiply-accumulate operations and one 8-bit exponent bias every 2304 minifloat numbers.
|
| 202 |
+
|
| 203 |
+
Scaling the Minifloat Representation: In Figure 2, which was discussed previously, minifloats have exponent biases that shift the representation to align with the maximum of the underlying value distribution. Additionally, the minifloat representation could be scaled (or stretched) over a wider or narrower part of the value distribution. We investigate this effect by varying the base of the exponent, and inspecting the underflow and root mean square error (rmse) of dot-products after quantization; results are shown in Figure 5c. The tensor under test is a gradient tensor with maximum exponent of -17 and mean exponent of -21. Mean scaling was proposed in S2FP8 (Cambier et al., 2020) for 8-bit training and works by centering the minifloat over the mean of the exponent value distribution. For the (3,2) format, mean scaling requires a base of 2.52, calculated as $b = 2^{-\frac{-17 + 21}{7 - 4}}$ . This is akin to redirecting precision from high value regions into smaller underflow regions, the result of which observably leads to increased error in the tested 6-bit regime. Better approaches could be designed to detect underflow and use higher precision arithmetic where necessary.
|
| 204 |
+
|
| 205 |
+
<table><tr><td>Component</td><td>Area (μm2)</td><td>Power (μW)</td></tr><tr><td>FP32</td><td>4782</td><td>10051</td></tr><tr><td>FP8 (w/ FP16 add)</td><td>829</td><td>1429</td></tr><tr><td>INT8 (w/ INT32 add)</td><td>417</td><td>1269</td></tr><tr><td>BM8</td><td>391</td><td>1141</td></tr><tr><td>BM6</td><td>200</td><td>624</td></tr><tr><td>INT8 (4x4 systolic)</td><td>7005</td><td>20253</td></tr><tr><td>FP8 (4x4 systolic)</td><td>18201</td><td>56202</td></tr><tr><td>BM8 (4x4 systolic)</td><td>6976</td><td>18765</td></tr></table>
|
| 206 |
+
|
| 207 |
+
Table 6: Logic area and power of single-cycle fused multiply-Add (FMA) and 4x4 array multipliers. Synthesized at ${750}\mathrm{{MHz}}$ with Cadence RTL Compiler 14.11 and ${28}\mathrm{\;{nm}}$ cell library
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
Figure 6: Computational density v ResNet-18 accuracy on ImageNet
|
| 211 |
+
|
| 212 |
+
# 5 HARDWARE EVALUATION
|
| 213 |
+
|
| 214 |
+
In this section, we evaluate the proposed block minifloat representation in hardware and compare against competitive integer and floating-point arithmetic. Figure 6 summarizes our results with a plot of computational density (measured as operations per unit silicon area) and ResNet-18 training accuracy on ImageNet. Computational density was obtained from an RTL design of single-cycle fused multiply-add (FMA) units and 4x4 systolic array multipliers. We performed synthesis at $750\mathrm{MHz}$ for $28\mathrm{nm}$ silicon technology and recorded area and power measurements for each number representation. Table 6 provides a subset of these results, with coverage of all BM formats supplied in Appendix A.4.
|
| 215 |
+
|
| 216 |
+
In summary, BM8 and BM6 arithmetic units are $2.1 \times (12.2 \times)$ and $4.1 \times (23.9 \times)$ smaller and consume $1.25 \times (8.8 \times)$ and $2.3 \times (16.1 \times)$ less power than competitive FP8/(FP32) representations. Such arithmetic, which has similar hardware complexity to INT8, may be especially useful in embedded applications where there are stricter area and power constraints but training still needs to achieve normal levels of accuracy and relatively high performance. With high computational density, BM arithmetic can achieve higher training throughput on compute intensive problems, while sub 8-bit BM formats have lower bandwidth requirements leading to faster training times in memory bound applications. Finally, overheads related to conversion from Kulisch to floating-point and BM quantization are expected to contribute little logic area relative to GEMM. This includes modules for leading-one detection, barrel shifter, maximum exponent calculation, pre-quantization buffering and stochastic rounding, each of which have an efficient implementation. Further support of these claims and other system-level effects are the subject of future work.
|
| 217 |
+
|
| 218 |
+
# 6 CONCLUSION
|
| 219 |
+
|
| 220 |
+
A new representation called Block Minifloat (BM) was presented for training DNNs effectively with reduced precision. Our representation allows the implicit exponent bias within IEEE-754 floating-point specifications to vary for a block of numbers, and can be trained with high accuracy using narrow exponent encodings. We describe how few exponent bits lead to significantly smaller hardware, while smaller representations reduce memory bandwidth requirements, leading to faster training than previous 8-bit approaches.
|
| 221 |
+
|
| 222 |
+
# REFERENCES
|
| 223 |
+
|
| 224 |
+
Ron Banner, Itay Hubara, Elad Hoffer, and Daniel Soudry. Scalable methods for 8-bit training of neural networks. In Advances in neural information processing systems, pp. 5145-5153, 2018.
|
| 225 |
+
Leopold Cambier, Anahita Bhiwandiwalla, Ting Gong, Mehran Nekuii, Oguz H Elibol, and Hanlin Tang. Shifted and squeezed 8-bit floating point format for low-precision training of deep neural networks. arXiv preprint arXiv:2001.05674, 2020.
|
| 226 |
+
Mauro Cettolo, Jan Niehues, Sebastian Stüker, Luisa Bentivogli, and Marcello Federico. Report on the 11th iwslt evaluation campaign, iwslt 2014. In Proceedings of the International Workshop on Spoken Language Translation, Hanoi, Vietnam, volume 57, 2014.
|
| 227 |
+
William J Dally. High performance hardware for machine learning. https://media.nips.cc/ Conferences/2015/tutorialslslides/Dally-NIPS-Tutorial-2015.pdf, 2015. Accessed: 2020-05-16.
|
| 228 |
+
Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009.
|
| 229 |
+
Mario Drumond, LIN Tao, Martin Jaggi, and Babak Falsafi. Training DNNs with hybrid block floating point. In Advances in Neural Information Processing Systems, pp. 453-463, 2018.
|
| 230 |
+
John L Gustafson and Isaac T Yonemoto. Beating floating point at its own game: Posit arithmetic. Supercomputing Frontiers and Innovations, 4(2):71-86, 2017.
|
| 231 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.
|
| 232 |
+
Itay Hubara, Matthieu Courbariaux, Daniel Soudry, Ran El-Yaniv, and Yoshua Bengio. Quantized neural networks: Training neural networks with low precision weights and activations. The Journal of Machine Learning Research, 18(1):6869-6898, 2017.
|
| 233 |
+
IEEE-754. IEEE 754-2019, Standard for Floating-Point Arithmetic. June 2019. ISBN 1-5044-5925-3 (print), 1-5044-5924-5 (e-PDF). doi: https://doi.org/10.1109/IEEEESTD.2019.876622.
|
| 234 |
+
Jeff Johnson. Rethinking floating point for deep learning. arXiv preprint arXiv:1811.01721, 2018.
|
| 235 |
+
Norman P Jouppi, Cliff Young, Nishant Patil, David Patterson, Gaurav Agrawal, Raminder Bajwa, Sarah Bates, Suresh Bhatia, Nan Boden, Al Borchers, et al. In-datacenter performance analysis of a tensor processing unit. In Proceedings of the 44th Annual International Symposium on Computer Architecture, pp. 1-12, 2017.
|
| 236 |
+
Dhiraj D. Kalamkar, Dheevatsa Mudigere, Naveen Mellempudi, Dipankar Das, Kunal Banerjee, Sasikanth Avancha, Dharma Teja Vooturi, Nataraj Jammalamadaka, Jianyu Huang, Hector Yuen, Jiyan Yang, Jongsoo Park, Alexander Heinecke, Evangelos Georganas, Sudarshan Srinivasan, Abhisek Kundu, Misha Smelyanskiy, Bharat Kaul, and Pradeep Dubey. A study of BFLOAT16 for deep learning training. CoRR, abs/1905.12322, 2019. URL http://arxiv.org/abs/1905.12322.
|
| 237 |
+
Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009.
|
| 238 |
+
Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in neural information processing systems, pp. 1097-1105, 2012.
|
| 239 |
+
Ulrich W Kulisch and Willard L Miranker. Computer arithmetic in theory and practice. Academic press, 2014.
|
| 240 |
+
Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang Fu, and Alexander C Berg. Ssd: Single shot multibox detector. In European conference on computer vision, pp. 21-37. Springer, 2016.
|
| 241 |
+
|
| 242 |
+
Mitchell P. Marcus, Mary Ann Marcinkiewicz, and Beatrice Santorini. Building a large annotated corpus of english: The penn treebank. Comput. Linguist., 19(2):313-330, June 1993. ISSN 0891-2017.
|
| 243 |
+
Paulius Micikevicius, Sharan Narang, Jonah Alben, Gregory Diamos, Erich Elsen, David Garcia, Boris Ginsburg, Michael Houston, Oleksii Kuchaiev, Ganesh Venkatesh, et al. Mixed precision training. arXiv preprint arXiv:1710.03740, 2017.
|
| 244 |
+
Daisuke Miyashita, Edward H Lee, and Boris Murmann. Convolutional neural networks using logarithmic data representation. arXiv preprint arXiv:1603.01025, 2016.
|
| 245 |
+
Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh Chen. Mobilenetv2: Inverted residuals and linear bottlenecks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4510-4520, 2018.
|
| 246 |
+
Xiao Sun, Jungwook Choi, Chia-Yu Chen, Naigang Wang, Swagath Venkataramani, Vijayalakshmi Viji Srinivasan, Xiaodong Cui, Wei Zhang, and Kailash Gopalakrishnan. Hybrid 8-bit floating point (hfp8) training and inference for deep neural networks. In Advances in Neural Information Processing Systems, pp. 4901-4910, 2019.
|
| 247 |
+
Mingxing Tan and Quoc V Le. Efficientnet: Rethinking model scaling for convolutional neural networks. arXiv preprint arXiv:1905.11946, 2019.
|
| 248 |
+
Naigang Wang, Jungwook Choi, Daniel Brand, Chia-Yu Chen, and Kailash Gopalakrishnan. Training deep neural networks with 8-bit floating point numbers. In Advances in neural information processing systems, pp. 7675-7684, 2018.
|
| 249 |
+
Shuang Wu, Guoqi Li, Feng Chen, and Luping Shi. Training and inference with integers in deep neural networks. CoRR, abs/1802.04680, 2018. URL http://arxiv.org/abs/1802.04680.
|
| 250 |
+
Guandao Yang, Tianyi Zhang, Polina Kirichenko, Junwen Bai, Andrew Gordon Wilson, and Christopher De Sa. SWALP: Stochastic Weight Averaging in Low-Precision Training. arXiv preprint arXiv:1904.11943, 2019.
|
| 251 |
+
Tianyi Zhang, Zhiqiu Lin, Guandao Yang, and Christopher De Sa. Qpytorch: A low-precision arithmetic simulation framework. arXiv preprint arXiv:1910.04540, 2019.
|
| 252 |
+
Shuchang Zhou, Yuxin Wu, Zekun Ni, Xinyu Zhou, He Wen, and Yuheng Zou. Dorefa-net: Training low bitwidth convolutional neural networks with low bitwidth gradients. arXiv preprint arXiv:1606.06160, 2016.
|
| 253 |
+
|
| 254 |
+
# A APPENDIX
|
| 255 |
+
|
| 256 |
+
# A.1 BLOCK MINIFLOAT NUMBER FORMATS
|
| 257 |
+
|
| 258 |
+
The full spectrum of Block Minifloat (BM) formats are presented in Table 7. BM formats are designed with consideration for the hardware cost of the Kulisch accumulator and also dynamic range and precision requirements.
|
| 259 |
+
|
| 260 |
+
Table 7: Comparison of Block Minifloat number formats
|
| 261 |
+
|
| 262 |
+
<table><tr><td>Scheme</td><td>Format1(e, m)</td><td>Range2(dB)</td><td>Precision3(€)</td><td>Kulisch kadd</td><td>Acc. kshift</td></tr><tr><td>FP32</td><td>(8,23)</td><td>1668</td><td>2-24</td><td>561</td><td>512</td></tr><tr><td>Bfloat16 (Kalamkar et al., 2019)</td><td>(8,7)</td><td>1529</td><td>2-8</td><td>529</td><td>512</td></tr><tr><td>FP16</td><td>(5,10)</td><td>241</td><td>2-11</td><td>87</td><td>64</td></tr><tr><td>FP8 (Wang et al., 2018)</td><td>(5,2)</td><td>185</td><td>2-3</td><td>71</td><td>64</td></tr><tr><td>S2FP8 (Cambier et al., 2020)</td><td>(5,2)</td><td>185</td><td>2-3</td><td>71</td><td>64</td></tr><tr><td>HFP8 (Sun et al., 2019)</td><td>(4,3)/(5,2)</td><td>108/185</td><td>2-4/2-3</td><td>56</td><td>48</td></tr><tr><td>SWALP (Yang et al., 2019)</td><td>(0,7)</td><td>42.1</td><td>2-8</td><td>32</td><td>-</td></tr><tr><td>INT8 (Wu et al., 2018)</td><td>(0,7)</td><td>42.1</td><td>2-8</td><td>32</td><td>-</td></tr><tr><td>BM8 (ours)</td><td>(2,5)/(4,3)</td><td>48.0/108</td><td>2-6/2-4</td><td>31</td><td>20</td></tr><tr><td>BM7 (ours)</td><td>(2,4)/(4,2)</td><td>41.9/101</td><td>2-5/2-3</td><td>29</td><td>20</td></tr><tr><td>BM6 (ours)</td><td>(2,3)/(3,2)</td><td>35.6/53.0</td><td>2-4/2-3</td><td>20</td><td>12</td></tr><tr><td>BM5 (ours)</td><td>(2,2)/(3,1)</td><td>28.9/45.7</td><td>2-3/2-2</td><td>18</td><td>12</td></tr><tr><td>BM5-log (ours)</td><td>(4,0)/(4,0)</td><td>28.9/45.7</td><td>2-3/2-2</td><td>33</td><td>32</td></tr><tr><td>BM4 (ours)</td><td>(2,1)/(3,0)</td><td>21.6/42.1</td><td>2-3/2-1</td><td>16</td><td>12</td></tr><tr><td>BM4-log (ours)</td><td>(3,0)/(3,0)</td><td>28.9/45.7</td><td>2-3/2-2</td><td>17</td><td>16</td></tr></table>
|
| 263 |
+
|
| 264 |
+
<sup>1</sup> hybrid formats, i.e. forward/backward
|
| 265 |
+
2 dynamic range in decibels $20\log_{10}(X_{max}^{+} / X_{min}^{+})$
|
| 266 |
+
3 relative round-off error, i.e. $2^{-m} \times 2^{-1}$
|
| 267 |
+
|
| 268 |
+
# A.2 SOFTWARE IMPLEMENTATION DETAILS
|
| 269 |
+
|
| 270 |
+
Block Minifloat (BM) arithmetic requires custom hardware to achieve gains in speed and energy efficiency. We use QPyTorch (Zhang et al., 2019), an open source framework for low-precision training, to simulate the behaviour of BM hardware with existing PyTorch and CUDA libraries. QPyTorch provides a simple interface for applying quantization in the forward path (weight and activation tensors) and backward path (error, gradient and momentum tensors), ensuring that all numbers have a low-precision representation, while the actual GEMM and AXPY operations are computed in single-precision floating-point (FP32). This last point means that QPyTorch can not ordinarily be used to research low-precision accumulation strategies. Our work is different. Kulisch accumulators (as described in Section 2.3) compute exact dot-products, and therefore our GEMMs are adequately approximated by FP32 arithmetic (which is close to exact). QPyTorch is available online<sup>4</sup> and supports floating-point (without denominals), fixed point and block floating point number formats. Our code implementation<sup>5</sup> is an extension of this package.
|
| 271 |
+
|
| 272 |
+
# A.3 MODEL DETAILS AND ADDITIONAL EXPERIMENTS
|
| 273 |
+
|
| 274 |
+
# A.3.1 BLOCK MINIFLOAT CONVERGENCE CURVES ON IMAGENET
|
| 275 |
+
|
| 276 |
+

|
| 277 |
+
Figure 7: Train loss and top-1 validation accuracy for the full spectrum of Block Minifloat formats trained on ImageNet using a ResNet-18 model
|
| 278 |
+
|
| 279 |
+

|
| 280 |
+
|
| 281 |
+
# A.3.2 COMPARISON WITH BLOCK FLOATING POINT (BFP) ON IMAGENET
|
| 282 |
+
|
| 283 |
+
As described in Section 1, block minifloats bridge the gap between narrow floating-point and block floating point (BFP) representations. The main idea is that better outcomes in terms of accuracy and hardware efficiency can be achieved by exploring the spectrum between the two representations. While BFP ensures that the majority of computation involves dense integer arithmetic, the lack of fine-grained dynamic range typically leads to accuracy loss on larger models and more complex datasets. In Table 8 (below), we show that BM recovers accuracy loss for 6-bit and 8-bit formats on ImageNet while maintaining the same advantages in hardware.
|
| 284 |
+
|
| 285 |
+
Table 8: Comparison of Block Minifloat (BM) and Block Floating Point (BFP) number formats trained on ImageNet with ResNet-18 model.
|
| 286 |
+
|
| 287 |
+
<table><tr><td>Scheme</td><td>BFP (ours)</td><td>BM (ours)</td><td>∇</td></tr><tr><td>6-bit</td><td>67.0</td><td>69.0</td><td>+2.0</td></tr><tr><td>8-bit</td><td>69.2</td><td>69.8</td><td>+0.6</td></tr></table>
|
| 288 |
+
|
| 289 |
+
# A.3.3 TRANSFORMER MODEL (WMT)
|
| 290 |
+
|
| 291 |
+
We trained the Transformer Base model from the FairSeq repository on the IWSLT'14 German to English translation task. We used Adam optimizer and modified the FairSeq implementation with BM8 quantization. We used default training parameters found in the repository and trained for 25 epochs. BLEU scores were calculated using the script from the repository and show similar convergence between BM8 and FP32 models.
|
| 292 |
+
|
| 293 |
+

|
| 294 |
+
Figure 8: Training convergence curves for Transformer on IWSLT'14 DE-En dataset
|
| 295 |
+
|
| 296 |
+
# A.3.4 SSD-LITE (MOBILENET-V2) (VOC)
|
| 297 |
+
|
| 298 |
+
We adapted a PyTorch implementation of SSD-lite from an online repository $^{7}$ . The base network is MobileNet-V2 (Sandler et al., 2018) which was pretrained on ImageNet. The enitre network is trained on VOC2012 and VOC2007 trainval datasets and evaluated on VOC2007 validation dataset. We apply BM8 quantization to all weights, activations and gradients before GEMM computations in the forward and backward paths. The network was trained with default parameter settings provided in the repository as follows: SGD with momentum of 0.9, weight decay factor 0.0005, batches of 32 images, and cosine annealing ( $t_{max} = 200$ ) with an initial learning rate of 0.01. After 200 epochs, BM8 achieves a mAP of 68.0 which is sufficiently close to the reported accuracy of 68.6.
|
| 299 |
+
|
| 300 |
+
# A.3.5 EFFICIENTNET-B6 (IMAGENET)
|
| 301 |
+
|
| 302 |
+
We adapted a PyTorch implementation of EfficientNet (Tan & Le, 2019) from an online repository $^{8}$ . We trained the smallest EfficientNet-b0 network on a reduced sized ImageNet dataset, where the images are resized from 256x256 to 128x128. This choice was made to accelerate the training time, which is slowed down by $5 \times$ with our BM8 quantization function. The network is trained on one GPU for only 60 epochs using batch size 256 and an initial learning rate of 0.1 which is decayed exponentially with gamma of 0.90387. Figure 9 shows convergence of BM8 with an FP32 baseline.
|
| 303 |
+
|
| 304 |
+

|
| 305 |
+
Figure 9: Training convergence curves for EfficientNet-b0 on ImageNet
|
| 306 |
+
|
| 307 |
+
# A.4 HARDWARE SYNTHESIS
|
| 308 |
+
|
| 309 |
+
Fused multiply-add (FMA) units were designed in RTL for floating-point and block minifloat representations. We modified code from the Deepfloat $^9$ repository for FP32, FP16 and FP8 units. The BM units with Kulisch accumulation were hand written in Verilog following the block design given
|
| 310 |
+
|
| 311 |
+
in Figure 10. All designs were synthesised at $750\mathrm{Mhz}$ using Cadence RTL compiler 14.11 and a commercial $28\mathrm{nm}$ standard cell library. Since GEMM hardware is typically designed from tiles of smaller computational units, we also provide synthesis results for small $4\times 4$ systolic array multipliers. Full coverage of our results are shown in Table 9 and Table 10. Table 11 is also provided, and shows the component breakdown and scaling of Kulisch related costs in different 8-bit BM regimes. Given that BM relies on hybrid formats, the multiplier operands are both sized for the largest mantissa plus one bit for denominal support. Compared to $(4,3)/(5,2)$ , which is the format used in HFP8 (Sun et al., 2019), BM8 $(2,5)/(4,3)$ is $1.6\times$ smaller. This is because BM8 has narrower exponent encodings that reduces the width of the Kulisch accumulator. Finally, the overhead for converting from Kulisch to FP32 is relatively small. We synthesised this to only contribute $264\mu m^2$ in area for 32-bit accumulators, the cost of which is amortized over the length of the dot product.
|
| 312 |
+
|
| 313 |
+

|
| 314 |
+
Figure 10: Block diagram of block minifloat multiply-add; $\mathrm{A}^{*}\mathrm{B} + \mathrm{C}$ , where A and B are minifloats and C is an integer
|
| 315 |
+
|
| 316 |
+
Table 9: Synthesized logic area and power of single-cycle fused multiply-Add (FMA) at ${750}\mathrm{{MHz}}$ on ${28}\mathrm{\;{nm}}$ chip.
|
| 317 |
+
|
| 318 |
+
<table><tr><td>Component</td><td>Area μm2</td><td>Power μW</td></tr><tr><td>FP32</td><td>4782</td><td>10051</td></tr><tr><td>FP16</td><td>1116</td><td>2120</td></tr><tr><td>FP8 (w/ FP16 add)</td><td>829</td><td>1429</td></tr><tr><td>INT8 (w/ INT32 add)</td><td>417</td><td>1269</td></tr><tr><td>BM8</td><td>391</td><td>1141</td></tr><tr><td>BM7</td><td>280</td><td>840</td></tr><tr><td>BM6</td><td>200</td><td>624</td></tr><tr><td>BM5</td><td>171</td><td>546</td></tr><tr><td>BM5-log</td><td>231</td><td>801</td></tr><tr><td>BM4</td><td>115</td><td>361</td></tr><tr><td>BM4-log</td><td>120</td><td>426</td></tr></table>
|
| 319 |
+
|
| 320 |
+
Table 10: Synthesized logic area and power of $4 \times 4$ systolic array multipliers at $750 \mathrm{MHz}$ on $28 \mathrm{~nm}$ chip.
|
| 321 |
+
|
| 322 |
+
<table><tr><td>Component</td><td>Area μm2</td><td>Power μW</td></tr><tr><td>FP8 (w/ FP16 add)</td><td>18201</td><td>56202</td></tr><tr><td>INT8 (w/ INT32 add)</td><td>7005</td><td>20253</td></tr><tr><td>BM8</td><td>6976</td><td>18765</td></tr><tr><td>BM6</td><td>4083</td><td>11959</td></tr></table>
|
| 323 |
+
|
| 324 |
+
Table 11: Component breakdown and logic area for different 8-bit BM formats
|
| 325 |
+
|
| 326 |
+
<table><tr><td rowspan="2">Format (e,m)/(e,m)</td><td colspan="3">Details</td><td colspan="4">Area (μm2)</td></tr><tr><td>multiply</td><td>kadd</td><td>kshift</td><td>comb.1</td><td>kadd</td><td>kshift</td><td>total</td></tr><tr><td>(3,4)/(4,3)</td><td>(5b × 5b)</td><td>34</td><td>24</td><td>210</td><td>93</td><td>74</td><td>377</td></tr><tr><td>(2,5)/(4,3)</td><td>(6b × 6b)</td><td>31</td><td>20</td><td>235</td><td>79</td><td>77</td><td>391</td></tr><tr><td>(3,4)/(5,2)</td><td>(5b × 5b)</td><td>49</td><td>40</td><td>253</td><td>199</td><td>95</td><td>547</td></tr><tr><td>(4,3)/(5,2)</td><td>(4b × 4b)</td><td>56</td><td>48</td><td>259</td><td>276</td><td>104</td><td>639</td></tr><tr><td>(5,2)/(5,2)</td><td>(3b × 3b)</td><td>71</td><td>64</td><td>300</td><td>361</td><td>113</td><td>774</td></tr><tr><td>(0,7)/(0,7)</td><td>(8b × 8b)</td><td>32</td><td>NA</td><td>NA</td><td>NA</td><td>NA</td><td>418</td></tr></table>
|
| 327 |
+
|
| 328 |
+
${}^{1}$ combinational logic includes multiply component
|
ablockminifloatrepresentationfortrainingdeepneuralnetworks/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:84c5036c24f75f8872f7b086af31de2beb523f697f7debab2303aa432e1dd00c
|
| 3 |
+
size 656925
|
ablockminifloatrepresentationfortrainingdeepneuralnetworks/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a636050fb207697f05d506577a561fe5481f0cbd3141b165a1ce2d390016adce
|
| 3 |
+
size 447249
|
acceleratingconvergenceofreplicaexchangestochasticgradientmcmcviavariancereduction/d9dd3a22-cf24-49a7-9315-1623e966a68f_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:389adccebd0a1c87b83329d7ce02875d0fecef0ba6e6d3f96b86b1a6768a6165
|
| 3 |
+
size 236366
|
acceleratingconvergenceofreplicaexchangestochasticgradientmcmcviavariancereduction/d9dd3a22-cf24-49a7-9315-1623e966a68f_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:50978dfaf0a458b495d1412d14243519ecd266fc4149d18cbe1394417affc78c
|
| 3 |
+
size 269131
|
acceleratingconvergenceofreplicaexchangestochasticgradientmcmcviavariancereduction/d9dd3a22-cf24-49a7-9315-1623e966a68f_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c1d248791c5e261a0bb214e709cbba13fea97685bd06c737bf5c1fa27d216dd6
|
| 3 |
+
size 2411648
|
acceleratingconvergenceofreplicaexchangestochasticgradientmcmcviavariancereduction/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
acceleratingconvergenceofreplicaexchangestochasticgradientmcmcviavariancereduction/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d71b4a54a9f193a20a8357c76ebbb7aaee35b736632b473a5afc55ba76bfbd8b
|
| 3 |
+
size 1752416
|
acceleratingconvergenceofreplicaexchangestochasticgradientmcmcviavariancereduction/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b36a4ab8d8add4192120e0d29da8fe20a152db687d6075385839033d6a9624b4
|
| 3 |
+
size 1443953
|
accuratelearningofgraphrepresentationswithgraphmultisetpooling/91d90628-ce28-4dff-8086-c85b0048d45c_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e2ad445060599f0cf560d985d0dd13de2841657ddc4f600f743e8f906be6ea8
|
| 3 |
+
size 157669
|
accuratelearningofgraphrepresentationswithgraphmultisetpooling/91d90628-ce28-4dff-8086-c85b0048d45c_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:af72913ab062a6cd28f6de08db751927b2e6d72d6884960f0af0fbf7c4d1d05a
|
| 3 |
+
size 185420
|
accuratelearningofgraphrepresentationswithgraphmultisetpooling/91d90628-ce28-4dff-8086-c85b0048d45c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ad0dcfcc5f5ebbf4221579f101d6190ce00d9686ca966988a94656c3e8c646c
|
| 3 |
+
size 1781536
|
accuratelearningofgraphrepresentationswithgraphmultisetpooling/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
accuratelearningofgraphrepresentationswithgraphmultisetpooling/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c579797fe351a15c4d9a09a252182ec7c069854d3c82d352f89184ed6e3fddb0
|
| 3 |
+
size 880888
|
accuratelearningofgraphrepresentationswithgraphmultisetpooling/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d991e4be0d80425d9bef229d653e9e2aaf01e557fd6ccf4c0b6339add37eeb76
|
| 3 |
+
size 822208
|
achievinglinearspeedupwithpartialworkerparticipationinnoniidfederatedlearning/24ddb598-0a37-419b-abfd-9ac900132497_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aee4849fce9f4fa7ae109c74ba4dcafc3f63d39e15d96f23771ab48ec144d352
|
| 3 |
+
size 156710
|
achievinglinearspeedupwithpartialworkerparticipationinnoniidfederatedlearning/24ddb598-0a37-419b-abfd-9ac900132497_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bbe1496e31ea8dec1fa94f6f3f6fdcec8644966109d30777f228b95b37684085
|
| 3 |
+
size 180012
|
achievinglinearspeedupwithpartialworkerparticipationinnoniidfederatedlearning/24ddb598-0a37-419b-abfd-9ac900132497_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c2227855d14d91eb23030e2a034ab5753db97bb5df53543eeabba183caa3e667
|
| 3 |
+
size 872390
|
achievinglinearspeedupwithpartialworkerparticipationinnoniidfederatedlearning/full.md
ADDED
|
@@ -0,0 +1,622 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ACHIEVING LINEAR SPEEDUP WITH PARTIAL WORKER PARTICIPATION IN NON-IID FEDERATED LEARNING
|
| 2 |
+
|
| 3 |
+
Haibo Yang, Minghong Fang, and Jia Liu
|
| 4 |
+
|
| 5 |
+
Department of Electrical and Computer Engineering
|
| 6 |
+
|
| 7 |
+
The Ohio State University
|
| 8 |
+
|
| 9 |
+
Columbus, OH 43210 USA
|
| 10 |
+
|
| 11 |
+
{yang.5952,fang.841,liu.1736}@osu.edu
|
| 12 |
+
|
| 13 |
+
# ABSTRACT
|
| 14 |
+
|
| 15 |
+
Federated learning (FL) is a distributed machine learning architecture that leverages a large number of workers to jointly learn a model with decentralized data. FL has received increasing attention in recent years thanks to its data privacy protection, communication efficiency and a linear speedup for convergence in training (i.e., convergence performance increases linearly with respect to the number of workers). However, existing studies on linear speedup for convergence are only limited to the assumptions of i.i.d. datasets across workers and/or full worker participation, both of which rarely hold in practice. So far, it remains an open question whether or not the linear speedup for convergence is achievable under non-i.i.d. datasets with partial worker participation in FL. In this paper, we show that the answer is affirmative. Specifically, we show that the federated averaging (FedAvg) algorithm (with two-sided learning rates) on non-i.i.d. datasets in non-convex settings achieves a convergence rate $\mathcal{O}\left(\frac{1}{\sqrt{mKT}} + \frac{1}{T}\right)$ for full worker participation and a convergence rate $\mathcal{O}\left(\frac{\sqrt{K}}{\sqrt{nT}} + \frac{1}{T}\right)$ for partial worker participation, where $K$ is the number of local steps, $T$ is the number of total communication rounds, $m$ is the total worker number and $n$ is the worker number in one communication round if for partial worker participation. Our results also reveal that the local steps in FL could help the convergence and show that the maximum number of local steps can be improved to $T/m$ in full worker participation. We conduct extensive experiments on MNIST and CIFAR-10 to verify our theoretical results.
|
| 16 |
+
|
| 17 |
+
# 1 INTRODUCTION
|
| 18 |
+
|
| 19 |
+
Federated Learning (FL) is a distributed machine learning paradigm that leverages a large number of workers to collaboratively learn a model with decentralized data under the coordination of a centralized server. Formally, the goal of FL is to solve an optimization problem, which can be decomposed as:
|
| 20 |
+
|
| 21 |
+
$$
|
| 22 |
+
\min _ {x \in \mathbb {R} ^ {d}} f (x) := \frac {1}{m} \sum_ {i = 1} ^ {m} F _ {i} (x),
|
| 23 |
+
$$
|
| 24 |
+
|
| 25 |
+
where $F_{i}(x)\triangleq \mathbb{E}_{\xi_{i}\sim D_{i}}[F_{i}(x,\xi_{i})]$ is the local (non-convex) loss function associated with a local data distribution $D_{i}$ and $m$ is the number of workers. FL allows a large number of workers (such as edge devices) to participate flexibly without sharing data, which helps protect data privacy. However, it also introduces two unique challenges: unseen in traditional distributed learning algorithms that are used typically for large data centers:
|
| 26 |
+
|
| 27 |
+
- Non-independent-identically-distributed (non-i.i.d.) datasets across workers (data heterogeneity): In conventional distributed learning in data centers, the distribution for each worker's local dataset can usually be assumed to be i.i.d., i.e., $D_{i} = D, \forall i \in \{1, \dots, m\}$ . Unfortunately, this assumption rarely holds for FL since data are generated locally at the workers based on their circumstances, i.e., $D_{i} \neq D_{j}$ , for $i \neq j$ . It will be seen later that the non-i.i.d assumption imposes significant challenges in algorithm design for FL and their performance analysis.
|
| 28 |
+
|
| 29 |
+
- Time-varying partial worker participation (systems non-stationarity): With the flexibility for workers' participation in many scenarios (particularly in mobile edge computing), workers may randomly join or leave the FL system at will, thus rendering the active worker set stochastic and time-varying across communication rounds. Hence, it is often infeasible to wait for all workers' responses as in traditional distributed learning, since inactive workers or stragglers will significantly slow down the whole training process. As a result, only a subset of the workers may be chosen by the server in each communication round, i.e., partial worker participation.
|
| 30 |
+
|
| 31 |
+
In recent years, the Federated Averaging method (FedAvg) and its variants (McMahan et al., 2016; Li et al., 2018; Hsu et al., 2019; Karimireddy et al., 2019; Wang et al., 2019a) have emerged as a prevailing approach for FL. Similar to the traditional distributed learning, FedAvg leverages local computation at each worker and employs a centralized parameter server to aggregate and update the model parameters. The unique feature of FedAvg is that each worker runs multiple local stochastic gradient descent (SGD) steps rather than just one step as in traditional distributed learning between two consecutive communication rounds. For i.i.d. datasets and the full worker participation setting, Stich (2018) and Yu et al. (2019b) proposed two variants of FedAvg that achieve a convergence rate of $\mathcal{O}\left(\frac{mK}{T} +\frac{1}{\sqrt{mKT}}\right)$ with a bounded gradient assumption for both strongly convex and nonconvex problems, where $m$ is the number of workers, $K$ is the local update steps, and $T$ is the total communication rounds. Wang & Joshi (2018) and Stich & Karimireddy (2019) further proposed improved FedAvg algorithms to achieve an $\mathcal{O}\left(\frac{m}{T} +\frac{1}{\sqrt{mKT}}\right)$ convergence rate without bounded gradient assumption. Notably, for a sufficiently large $T$ , the above rates become $\mathcal{O}\left(\frac{1}{\sqrt{mKT}}\right)^1$ , which implies a linear speedup with respect to the number of workers. This linear speedup is highly desirable for an FL algorithm because the algorithm is able to effectively leverage the massive parallelism in a large FL system. However, with non-i.i.d. datasets and partial worker participation in FL, a fundamental open question arises: Can we still achieve the same linear speedup for convergence, i.e., $\mathcal{O}\left(\frac{1}{\sqrt{mKT}}\right)$ , with non-i.i.d. datasets and under either full or partial worker participation?
|
| 32 |
+
|
| 33 |
+
In this paper, we show the answer to the above question is affirmative. Specifically, we show that a generalized FedAvg with two-sided learning rates achieves linear convergence speedup with non-i.i.d. datasets and under full/partial worker participation. We highlight our contributions as follows:
|
| 34 |
+
|
| 35 |
+
- For non-convex problems, we show that the convergence rate of the FedAvg algorithm on non-i.i.d. dataset are $\mathcal{O}\left(\frac{1}{\sqrt{mKT}} + \frac{1}{T}\right)$ and $\mathcal{O}\left(\frac{\sqrt{K}}{\sqrt{nT}} + \frac{1}{T}\right)$ for full and partial worker participation, respectively, where $n$ is the size of the partially participating worker set. This indicates that our proposed algorithm achieves a linear speedup for convergence rate for a sufficiently large $T$ . When reduced to the i.i.d. case, our convergence rate is $\mathcal{O}\left(\frac{1}{TK} + \frac{1}{\sqrt{mKT}}\right)$ , which is also better than previous works. We summarize the convergence rate comparisons for both i.i.d. and non-i.i.d. cases in Table 1. It is worth noting that our proof does not require the bounded gradient assumption. We note that the SCAFFOLD algorithm (Karimireddy et al., 2019) also achieves the linear speedup but extra variance reduction operations are required, which lead to higher communication costs and implementation complexity. By contrast, we do not have such extra requirements in this paper.
|
| 36 |
+
- In order to achieve a linear speedup, i.e., a convergence rate $\mathcal{O}\left(\frac{1}{\sqrt{mKT}}\right)$ , we show that the number of local updates $K$ can be as large as $T / m$ , which improves the $T^{1/3} / m$ result previously shown in Yu et al. (2019a) and Karimireddy et al. (2019). As shown later in the communication complexity comparison in Table 1, a larger number of local steps implies relatively fewer communication rounds, thus less communication overhead. Interestingly, our results also indicate that the number of local updates $K$ does not hurt but rather help the convergence with a proper learning rates choice in full worker participation. This overcomes the limitation as suggested in Li et al. (2019b) that local SGD steps might slow down the convergence $(\mathcal{O}\left(\frac{K}{T}\right)$ for strongly convex case). This result also reveals new insights on the relationship between the number of local steps and learning rate.
|
| 37 |
+
|
| 38 |
+
Table 1: Convergence rates of optimization methods for FL.
|
| 39 |
+
|
| 40 |
+
<table><tr><td>Dataset</td><td>Algorithm6</td><td>Convexity7</td><td>Partial Worker</td><td>Convergence Rate</td><td>Communication complexity</td></tr><tr><td rowspan="5">IID</td><td>Stich1</td><td>SC</td><td>×</td><td>O(mK/T + 1/√mKT)</td><td>O(mK/ε + 1/mKε2)</td></tr><tr><td>Yu1</td><td>NC</td><td>×</td><td>O(mK/T + 1/√mKT)</td><td>O(mK/ε + 1/mKε2)</td></tr><tr><td>Wang</td><td>NC</td><td>×</td><td>O(m/T + 1/√mKT)</td><td>O(m/ε + 1/mKε2)</td></tr><tr><td>Stich2</td><td>NC</td><td>×</td><td>O(m/T + 1/√mKT)</td><td>O(m/ε + 1/mKε2)</td></tr><tr><td>This paper</td><td>NC</td><td>✓</td><td>O(1/KT + 1/√mKT)</td><td>O(1/Kε + 1/mKε2)</td></tr><tr><td rowspan="6">NON-IID</td><td>Khaled1</td><td>C</td><td>×</td><td>O(m/T + 1/√mT)</td><td>O(m/ε + 1/mKε2)</td></tr><tr><td>Yu22</td><td>NC</td><td>×</td><td>O(m/KT + 1/√mKT)</td><td>O(m/Kε + 1/mKε2)</td></tr><tr><td>Li</td><td>SC</td><td>✓</td><td>O(K/T)</td><td>O(K/ε)</td></tr><tr><td>Karimireddy3</td><td>NC</td><td>✓</td><td>O(1/T2/3 + M/√SKT)</td><td>O(1/ε3/2 + M/SKε2)</td></tr><tr><td>Karimireddy4</td><td>NC</td><td>✓</td><td>O(1/T + 1/√mKT)</td><td>O(1/ε + 1/mKε2)</td></tr><tr><td>This paper5</td><td>NC</td><td>✓</td><td>O(1/T + 1/√mKT)</td><td>O(1/ε + 1/mKε2)</td></tr></table>
|
| 41 |
+
|
| 42 |
+
1 Full gradients are used for each worker.
|
| 43 |
+
2 Local momentum is used at each worker.
|
| 44 |
+
3 A FedAvg algorithm with two-sided learning rates. $M^2 = \mathcal{O}(1) + \mathcal{O}(KS(1 - \frac{S}{m}))$ . $S = m$ ( $S = n$ ) for full (partial) worker participation.
|
| 45 |
+
4 The SCAFFOLD algorithm in Karimireddy et al. (2019) for non-convex case.
|
| 46 |
+
5 The convergence rate becomes $\mathcal{O}\left(\frac{1}{T} +\frac{\sqrt{K}}{\sqrt{nT}}\right)$ under partial worker participation.
|
| 47 |
+
$^{6}$ Shorthand notation for references: Stich1 := Stich (2018), Yu2 := Yu et al. (2019b), Wang:= Wang & Joshi (2018), Stich2:= Stich & Karimireddy (2019); Khaled:= Khaled et al. (2019b), Yu2:= Yu et al. (2019a), Li:= Li et al. (2019b), and Karimireddy:= Karimireddy et al. (2019).
|
| 48 |
+
7 Shorthand notation for convexity: SC: Strongly Convex, C: Convex, and NC: Non-Convex.
|
| 49 |
+
|
| 50 |
+
Notation. In this paper, we let $m$ be the total number of workers and $S_{t}$ be the set of active workers for the $t$ -th communication round with size $|S_{t}| = n$ for some $n \in (0, m]$ .<sup>3</sup> We use $K$ to denote the number of local steps per communication round at each worker. We let $T$ be the number of total communication rounds. In addition, we use boldface to denote matrices/vectors. We let $\left[\cdot\right]_{t,k}^{i}$ represent the parameter of $k$ -th local step in the $i$ -th worker after the $t$ -th communication. We use $\|\cdot\|_2$ to denote the $\ell^2$ -norm. For a natural number $m$ , we use $[m]$ to represent the set $\{1, \dots, m\}$ .
|
| 51 |
+
|
| 52 |
+
The rest of the paper is organized as follows. In Section 2, we review the literature to put our work in comparative perspectives. Section 3 presents the convergence analysis for our proposed algorithm. Section 4 discusses the implication of the convergence rate analysis. Section 5 presents numerical results and Section 6 concludes this paper. Due to space limitation, the details of all proofs and some experiments are provided in the supplementary material.
|
| 53 |
+
|
| 54 |
+
# 2 RELATED WORK
|
| 55 |
+
|
| 56 |
+
The federated averaging (FedAvg) algorithm was first proposed by McMahan et al. (2016) for FL as a heuristic to improve communication efficiency and data privacy. Since then, this work has sparked many follow-ups that focus on FL with i.i.d. datasets and full worker participation (also known as LocalSGD (Stich, 2018; Yu et al., 2019b; Wang & Joshi, 2018; Stich & Karimireddy, 2019; Lin et al., 2018; Khaled et al., 2019a; Zhou & Cong, 2017)). Under these two assumptions, most of the theoretical works can achieve a linear speedup for convergence, i.e., $\mathcal{O}\left(\frac{1}{\sqrt{mKT}}\right)$ for a sufficiently large $T$ , matching the rate of the parallel SGD. In addition, LocalSGD is empirically shown to be communication-efficient and enjoys better generalization performance (Lin et al., 2018). For a comprehensive introduction to FL, we refer readers to Li et al. (2019a) and Kairouz et al. (2019).
|
| 57 |
+
|
| 58 |
+
Algorithm 1 A Generalized FedAvg Algorithm with Two-Sided Learning Rates.
|
| 59 |
+
Initialize $\mathbf{x}_0$
|
| 60 |
+
for $t = 0,\dots ,T - 1$ do The server samples a subset $S_{t}$ of workers with $|S_t| = n$ for each worker $i\in S_{t}$ in parallel do $\mathbf{x}_{t,0}^{i} = \mathbf{x}_{t}$ for $k = 0,\dots ,K - 1$ do Compute an unbiased estimate $\mathbf{g}_{t,k}^{i} = \nabla F_{i}(\mathbf{x}_{t,k}^{i},\xi_{t,k}^{i})$ of $\nabla F_{i}(\mathbf{x}_{t,k}^{i})$ Local worker update: $\mathbf{x}_{t,k + 1}^{i} = \mathbf{x}_{t,k}^{i} - \eta_{L}\mathbf{g}_{t,k}^{i}$ end for Let $\Delta_t^i = \mathbf{x}_{t,K}^i -\mathbf{x}_{t,0}^i = -\eta_L\sum_{k = 0}^{K - 1}\mathbf{g}_{t,k}^i$ Send $\Delta_t^i$ to the server.
|
| 61 |
+
end for At Server: Receive $\Delta_t^i,i\in S$ Let $\Delta_t = \frac{1}{|S|}\sum_{i\in S}\Delta_t^i$ Server Update: $\mathbf{x}_{t + 1} = \mathbf{x}_t + \eta \Delta_t$ Broadcasting $\mathbf{x}_{t + 1}$ to workers.
|
| 62 |
+
end for
|
| 63 |
+
|
| 64 |
+
For non-i.i.d. datasets, many works (Sattler et al., 2019; Zhao et al., 2018; Li et al., 2018; Wang et al., 2019a; Karimireddy et al., 2019; Huang et al., 2018; Jeong et al., 2018) heuristically demonstrated the performance of FedAvg and its variants. On convergence rate with full worker participation, many works (Stich et al., 2018; Yu et al., 2019a; Wang & Joshi, 2018; Karimireddy et al., 2019; Reddi et al., 2020) can achieve linear speedup, but their convergence rate bounds could be improved as shown in this paper. On convergence rate with partial worker participation, Li et al. (2019b) showed that the original FedAvg can achieve $\mathcal{O}(K / T)$ for strongly convex functions, which suggests that local SGD steps slow down the convergence in the original FedAvg. Karimireddy et al. (2019) analyzed a generalized FedAvg with two-sided learning rates under strongly convex, convex and non-convex cases. However, as shown in Table 1, none of them indicates that linear speedup is achievable with non-i.i.d. datasets under partial worker participation. Note that the SCAFFOLD algorithm (Karimireddy et al., 2019) can achieve linear speedup but extra variance reduction operations are required, which lead to higher communication costs and implementation complexity. In this paper, we show that this linear speedup can be achieved without any extra requirements. For more detailed comparisons and other algorithmic variants in FL and decentralized settings, we refer readers to Kairouz et al. (2019).
|
| 65 |
+
|
| 66 |
+
# 3 LINEAR SPEEDUP OF THE GENERALIZED FEDAVG WITH TWO-SIDED LEARNING RATES FOR NON-IID DATASETS
|
| 67 |
+
|
| 68 |
+
In this paper, we consider a FedAvg algorithm with two-sided learning rates as shown in Algorithm 1, which is generalized from previous works (Karimireddy et al., 2019; Reddi et al., 2020). Here, workers perform multiple SGD steps using a worker optimizer to minimize the local loss on its own dataset, while the server aggregates and updates the global model using another gradient-based server optimizer based on the returned parameters. Specifically, between two consecutive communication rounds, each worker performs $K$ SGD steps with the worker's local learning rate $\eta_{L}$ . We assume an unbiased estimator in each step, which is denoted by $\mathbf{g}_{t,k}^{i} = \nabla F_{i}(\mathbf{x}_{t,k}^{i},\xi_{t,k}^{i})$ , where $\xi_{t,k}^{i}$ is a random local data sample for $k$ -th steps after $t$ -th communication round at worker $i$ . Then, each worker sends the accumulative parameter difference $\Delta_t^i$ to the server. On the server side, the server aggregates all available $\Delta_t^i$ -values and updates the model parameters with a global learning rate $\eta$ . The FedAvg algorithm with two-sided learning rates provides a natural way to decouple the learning of workers and server, thus utilizing different learning rate schedules for workers and the server. The original FedAvg can be viewed as a special case of this framework with server-side learning rate being one.
|
| 69 |
+
|
| 70 |
+
In what follows, we show that a linear speedup for convergence is achievable by the generalized FedAvg for non-convex functions on non-i.i.d. datasets. We first state our assumptions as follows.
|
| 71 |
+
|
| 72 |
+
Assumption 1. (L-Lipschitz Continuous Gradient) There exists a constant $L > 0$ , such that $\| \nabla F_i(\mathbf{x}) - \nabla F_i(\mathbf{y}) \| \leq L \| \mathbf{x} - \mathbf{y} \|, \forall \mathbf{x}, \mathbf{y} \in \mathbb{R}^d, \text{and } i \in [m]$ .
|
| 73 |
+
|
| 74 |
+
Assumption 2. (Unbiased Local Gradient Estimator) Let $\xi_t^i$ be a random local data sample in the $t$ -th step at the $i$ -th worker. The local gradient estimator is unbiased, i.e., $\mathbb{E}[\nabla F_i(\mathbf{x}_t,\xi_t^i)] = \nabla F_i(\mathbf{x}_t)$ , $\forall i\in [m]$ , where the expectation is over all local datasets samples.
|
| 75 |
+
|
| 76 |
+
Assumption 3. (Bounded Local and Global Variance) There exist two constants $\sigma_L > 0$ and $\sigma_G > 0$ , such that the variance of each local gradient estimator is bounded by $\mathbb{E}[\| \nabla F_i(\mathbf{x}_t,\xi_t^i) - \nabla F_i(\mathbf{x}_t)\|^2 ]\leq \sigma_L^2,\forall i\in [m]$ , and the global variability of the local gradient of the cost function is bounded by $\| \nabla F_i(\mathbf{x}_t) - \nabla f(\mathbf{x}_t)\| ^2\leq \sigma_G^2,\forall i\in [m],\forall t.$
|
| 77 |
+
|
| 78 |
+
The first two assumptions are standard in non-convex optimization (Ghadimi & Lan, 2013; Bottou et al., 2018). For Assumption 3, the bounded local variance is also a standard assumption. We use a universal bound $\sigma_G$ to quantify the heterogeneity of the non-i.i.d. datasets among different workers. In particular, $\sigma_G = 0$ corresponds to i.i.d. datasets. This assumption is also used in other works for FL under non-i.i.d. datasets (Reddi et al., 2020; Yu et al., 2019b; Wang et al., 2019b) as well as in decentralized optimization (Kairouz et al., 2019). It is worth noting that we do not require a bounded gradient assumption, which is often assumed in FL optimization analysis.
|
| 79 |
+
|
| 80 |
+
# 3.1 CONVERGENCE ANALYSIS FOR FULL WORKER PARTICIPATION
|
| 81 |
+
|
| 82 |
+
In this subsection, we first analyze the convergence rate of the generalized FedAvg with two-sided learning rates under full worker participation, for which we have the following result:
|
| 83 |
+
|
| 84 |
+
Theorem 1. Let constant local and global learning rates $\eta_L$ and $\eta$ be chosen as such that $\eta_L \leq \frac{1}{8KL}$ and $\eta \eta_L \leq \frac{1}{KL}$ . Under Assumptions 1-3 and with full worker participation, the sequence of outputs $\{\mathbf{x}_k\}$ generated by Algorithm 1 satisfies:
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
\min _ {t \in [ T ]} \mathbb {E} [ \| \nabla f (\mathbf {x} _ {t}) \| _ {2} ^ {2} ] \leq \frac {f _ {0} - f _ {*}}{c \eta \eta_ {L} K T} + \Phi ,
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
where $\Phi \triangleq \frac{1}{c} \left[ \frac{L \eta \eta_L}{2m} \sigma_L^2 + \frac{5K \eta_L^2 L^2}{2} (\sigma_L^2 + 6K \sigma_G^2) \right]$ , $c$ is a constant, $f_0 \triangleq f(\mathbf{x}_0)$ , $f_* \triangleq f(\mathbf{x}_*)$ and the expectation is over the local dataset samples among workers.
|
| 91 |
+
|
| 92 |
+
Remark 1. The convergence bound contains two parts: a vanishing term $\frac{f_0 - f_*}{c\eta\eta_LKT}$ as $T$ increases and a constant term $\Phi$ whose size depends on the problem instance parameters and is independent of $T$ . The vanishing term's decay rate matches that of the typical SGD methods.
|
| 93 |
+
|
| 94 |
+
Remark 2. The first part of $\Phi$ (i.e., $\frac{L\eta_{L}}{2m}\sigma_{L}^{2}$ ) is due to the local stochastic gradients at each worker, which shrinks at rate $\frac{1}{m}$ as $m$ increases. The cumulative variance of the $K$ local steps contributes to the second term in $\Phi$ (i.e., $\frac{5K\eta_L^2L^2}{2} (\sigma_L^2 +6K\sigma_G^2)$ ), which is independent of $m$ and largely affected by the data heterogeneity. To make the second part small, an inverse relationship between the local learning rate and local steps should be satisfied, i.e., $\eta_{L} = \mathcal{O}\left(\frac{1}{K}\right)$ . Specifically, note that the global and local variances are quadratically and linearly amplified by $K$ . This requires a sufficiently small $\eta_{L}$ to offset the variance between two successive communication rounds to make the second term in $\Phi$ small. This is consistent with the observation in strongly convex FL that a decaying learning rate is needed for FL to converge under non-i.i.d. datasets even if full gradients used in each worker (Li et al., 2019b). However, we note that our explicit inverse relationship between $\eta_{L}$ and $K$ in the above is new. Intuitively, the $K$ local steps with a sufficiently small $\eta_{L}$ can be viewed as one SGD step with a large learning rate.
|
| 95 |
+
|
| 96 |
+
With Theorem 1, we immediately have the following convergence rate for the generalized FedAvg algorithm with a proper choice of two-sided learning rates:
|
| 97 |
+
|
| 98 |
+
Corollary 1. Let $\eta_{L} = \frac{1}{\sqrt{T}KL}$ and $\eta = \sqrt{Km}$ . The convergence rate of the generalized FedAvg algorithm under full worker participation is $\min_{t\in [T]}\mathbb{E}[\| \nabla f(\mathbf{x}_t)\| _2^2 ] = \mathcal{O}\bigg(\frac{1}{\sqrt{mKT}} +\frac{1}{T}\bigg)$ .
|
| 99 |
+
|
| 100 |
+
Remark 3. The generalized FedAvg algorithm with two-sided learning rates can achieve a linear speedup for non-i.i.d. datasets, i.e., a $\mathcal{O}\left(\frac{1}{\sqrt{mKT}}\right)$ convergence rate as long as $T \geq mK$ . Although many works have achieved this convergence rate asymptotically, we improve the maximum number
|
| 101 |
+
|
| 102 |
+
of local steps $K$ to $T / m$ , which is significantly better than the state-of-art bounds such as $T^{1/3} / m$ shown in (Karimireddy et al., 2019; Yu et al., 2019a; Kairouz et al., 2019). Note that a larger number of local steps implies relatively fewer communication rounds, thus less communication overhead. See also the communication complexity comparison in Table 1. For example, when $T = 10^6$ and $m = 100$ (as used in (Kairouz et al., 2019)), the local steps in our algorithm is $K \leq T / m = 10^4$ . However, $K \leq \frac{T^{1/3}}{m} = 1$ means that no extra local steps can be taken to reduce communication costs.
|
| 103 |
+
|
| 104 |
+
Remark 4. When degenerated to the i.i.d. case $(\sigma_G = 0)$ , the convergence rate becomes $\mathcal{O}\left(\frac{1}{TK} + \frac{1}{\sqrt{mKT}}\right)$ , which has a better first term in the bound compared with previous work as shown in Table 1.
|
| 105 |
+
|
| 106 |
+
# 3.2 CONVERGENCE ANALYSIS FOR PARTIAL WORKER PARTICIPATION
|
| 107 |
+
|
| 108 |
+
Partial worker participation in each communication round may be more practical than full worker participation due to many physical limitations of FL in practice (e.g., excessive delays because of too many devices to poll, malfunctioning devices, etc.). Partial worker participation can also accelerate the training by neglecting stragglers. We consider two sampling strategies proposed by Li et al. (2018) and Li et al. (2019b). Let $S_{t}$ be the participating worker index set at communication round $t$ with $|S_{t}| = n$ , $\forall t$ , for some $n \in (0,m]$ . $S_{t}$ is randomly and independently selected either with replacement (Strategy 1) or without replacement (Strategy 2) sequentially according to the sampling probabilities $p_i, \forall i \in [m]$ . For each member in $S_{t}$ , we pick a worker from the entire set $[m]$ uniformly at random with probability $p_i = \frac{1}{m}, \forall i \in [m]$ . That is, selection likelihood for anyone worker $i \in S_{t}$ is $p = \frac{n}{m}$ . Then we have the following results:
|
| 109 |
+
|
| 110 |
+
Theorem 2. Under Assumptions 1-3 with partial worker participation, the sequence of outputs $\{\mathbf{x}_k\}$ generated by Algorithm 1 with constant learning rates $\eta$ and $\eta_L$ satisfies:
|
| 111 |
+
|
| 112 |
+
$$
|
| 113 |
+
\min _ {t \in [ T ]} \mathbb {E} \left[ \| \nabla f (\mathbf {x} _ {t}) \| _ {2} ^ {2} \right] \leq \frac {f _ {0} - f _ {*}}{c \eta \eta_ {L} K T} + \Phi ,
|
| 114 |
+
$$
|
| 115 |
+
|
| 116 |
+
where $f_0 = f(\mathbf{x}_0)$ , $f_* = f(\mathbf{x}_*)$ , and the expectation is over the local dataset samples among workers.
|
| 117 |
+
|
| 118 |
+
For sampling Strategy 1, let $\eta$ and $\eta_L$ be chosen as such that $\eta_L \leq \frac{1}{8LK}$ , $\eta_{L}KL < \frac{n - 1}{n}$ and $30K^2\eta_L^2 L^2 - \frac{L\eta_{L}}{n} (90K^3 L^2\eta_L^2 + 3K) < 1$ . It then holds that:
|
| 119 |
+
|
| 120 |
+
$$
|
| 121 |
+
\Phi \triangleq \frac {1}{c} \left[ \frac {L \eta \eta_ {L}}{2 n} \sigma_ {L} ^ {2} + \frac {3 L K \eta \eta_ {L}}{2 n} \sigma_ {G} ^ {2} + (\frac {5 K \eta_ {L} ^ {2} L ^ {2}}{2} + \frac {1 5 K ^ {2} \eta \eta_ {L} ^ {3} L ^ {3}}{2 n}) (\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}) \right].
|
| 122 |
+
$$
|
| 123 |
+
|
| 124 |
+
For sampling Strategy 2, let $\eta$ and $\eta_L$ be chosen as such that $\eta_L \leq \frac{1}{8LK}$ , $\eta_{L}KL \leq \frac{n(m - 1)}{m(n - 1)}$ and $10K^2\eta_L^2 L^2 - L\eta\eta_L\frac{m - n}{n(m - 1)}(90K^3\eta_L^2 L^2 + 3K) < 1$ . It then holds that:
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
\Phi \triangleq \frac {1}{c} \left[ \frac {L \eta \eta_ {L}}{2 n} \sigma_ {L} ^ {2} + 3 L K \eta \eta_ {L} \frac {m - n}{2 n (m - 1)} \sigma_ {G} ^ {2} + \left(\frac {5 K \eta_ {L} ^ {2} L ^ {2}}{2} + 1 5 K ^ {2} \eta \eta_ {L} ^ {3} L ^ {3} \frac {m - n}{2 n (m - 1)}\right) (\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}) \right].
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
From Theorem 2, we immediately have the following convergence rate for the generalized FedAvg algorithm with a proper choice of two-sided learning rates:
|
| 131 |
+
|
| 132 |
+
Corollary 2. Let $\eta_{L} = \frac{1}{\sqrt{T}KL}$ and $\eta = \sqrt{Kn}$ . The convergence rate of the generalized FedAvg algorithm under partial worker participation and both sampling strategies are:
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
\min _ {t \in [ T ]} \mathbb {E} \| \nabla f (\mathbf {x} _ {t}) \| _ {2} ^ {2} \leq \mathcal {O} \left(\frac {\sqrt {K}}{\sqrt {n T}} + \frac {1}{T}\right).
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
Remark 5. The convergence rate bound for partial worker participation has the same structure but with a larger variance term. This implies that the partial worker participation through the uniform sampling does not result in fundamental changes in convergence (in order sense) except for an amplified variance due to fewer workers participating and random sampling. The intuition is that the uniform sampling (with/without replacement) for worker selection yields a good approximation of the entire worker distribution in expectation, which reduces the risk of distribution deviation due to the partial worker participation. As shown in Section 5, the distribution deviation due to fewer worker participation could render the training unstable, especially in highly non-i.i.d. cases.
|
| 139 |
+
|
| 140 |
+
Remark 6. The generalized FedAvg with partial worker participation under non-i.i.d. datasets can still achieve a linear speedup $\mathcal{O}\left(\frac{\sqrt{K}}{\sqrt{nT}}\right)$ with proper learning rate settings as shown in Corollary 2. In addition, when degenerated to i.i.d. case ( $\sigma_G = 0$ ), the convergence rate becomes $\mathcal{O}\left(\frac{1}{TK} + \frac{1}{\sqrt{nKT}}\right)$ .
|
| 141 |
+
|
| 142 |
+
Remark 7. Here, we let $|S_t| = n$ only for ease of presentation and better readability. We note that this is not a restrictive condition. We can show that $|S_t| = n$ can be relaxed to $|S_t| \geq n$ , $\forall t \in [T]$ and the same convergence rate still holds. In fact, our full proof in Appendix A.2 is for $|S_t| \geq n$ .
|
| 143 |
+
|
| 144 |
+
# 4 DISCUSSION
|
| 145 |
+
|
| 146 |
+
In light of above results, in what follows, we discuss several insights from the convergence analysis:
|
| 147 |
+
|
| 148 |
+
Convergence Rate: We show that the generalized FedAvg algorithm with two-sided learning rates can achieve a linear speedup, i.e., an $\mathcal{O}\left(\frac{1}{\sqrt{mKT}}\right)$ convergence rate with a proper choice of hyperparameters. Thus, it works well in large FL systems, where massive parallelism can be leveraged to accelerate training. The key challenge in convergence analysis stems from the different local loss functions (also called "model drift" in the literature) among workers due to the non-i.i.d. datasets and local steps. As shown above, we obtain a convergence bound for the generalized FedAvg method containing a vanishing term and a constant term (the constant term is similar to that of SGD). In contrast, the constant term in SGD is only due to the local variance. Note that, similar to SGD, the iterations do not diminish the constant term. The local variance $\sigma_L^2$ (randomness of stochastic gradients), global variability $\sigma_G^2$ (non-i.i.d. datasets), and the number of local steps $K$ (amplification factor) all contribute to the constant term, but the total global variability in $K$ local steps dominates the term. When the local learning rate $\eta_L$ is set to an inverse relationship with respect to the number of local steps $K$ , the constant term is controllable. An intuitive explanation is that the $K$ small local steps can be approximately viewed as one large step in conventional SGD. So this speedup and the more allowed local steps can be largely attributed to the two-sided learning rates setting.
|
| 149 |
+
|
| 150 |
+
Number of Local Steps: Besides the result that the maximum number of local steps is improved to $K \leq T / m$ , we also show that the local steps could help the convergence with the proper hyperparameter choices, which supports previous numerical results (McMahan et al., 2016; Stich, 2018; Lin et al., 2018) and is verified in different models with different non-i.i.d. degree datasets in Section 5. However, there are other results showing the local steps slow down the convergence (Li et al., 2019b). We believe that whether local steps help or hurt the convergence in FL worths further investigations.
|
| 151 |
+
|
| 152 |
+
Number of Workers: We show that the convergence rate improves substantially as the number of workers in each communication round increases. This is consistent with the results for i.i.d. cases in Stich (2018). For i.i.d. datasets, more workers means more data samples and thus less variance and better performance. For non-i.i.d. datasets, having more workers implies that the distribution of the sampled workers is a better approximation for the distribution of all workers. This is also empirically observed in Section 5. On the other hand, the sampling strategy plays an important role in non-i.i.d. case as well. Here, we adopt the uniform sampling (with/without replacement) to enlist workers to participate in FL. Intuitively, the distribution of the sampled workers' collective datasets under uniform sampling yields a good approximation of the overall data distribution in expectation.
|
| 153 |
+
|
| 154 |
+
Note that, in this paper, we assume that every worker is available to participate once being enlisted. However, this may not always be feasible. In practice, the workers need to be in certain states in order to be able to participate in FL (e.g., in charging or idle states, etc. (Eichner et al., 2019)). Therefore, care must be taken in sampling and enlisting workers in practice. We believe that the joint design of sampling schemes and the generalized FedAvg algorithm will have a significant impact on the convergence, which needs further investigations.
|
| 155 |
+
|
| 156 |
+
# 5 NUMERICAL RESULTS
|
| 157 |
+
|
| 158 |
+
We perform extensive experiments to verify our theoretical results. We use three models: logistic regression (LR), a fully-connected neural network with two hidden layers (2NN) and a convolution neural network (CNN) with the non-i.i.d. version of MNIST (LeCun et al., 1998) and one ResNet model with CIFAR-10 (Krizhevsky et al., 2009). Due to space limitation, we relegate some experimental results in the supplementary material.
|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
|
| 162 |
+

|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
|
| 166 |
+

|
| 167 |
+
(a) Impact of non-i.i.d. datasets.
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
(b) Impact of worker number.
|
| 171 |
+
|
| 172 |
+

|
| 173 |
+
(c) Impact of local steps
|
| 174 |
+
Figure 1: Training loss (top) and test accuracy (bottom) for the 2NN model with hyper-parameters setting: local learning rate 0.1, global learning rate 1.0: (a) worker number 100, local steps 5 epochs; (b) local steps 5 epochs; (c) 5 digits in each worker's dataset.
|
| 175 |
+
|
| 176 |
+
In this section, we elaborate the results under non-i.i.d. MNIST datasets for the 2NN. We distribute the MNIST dataset among $m = 100$ workers randomly and evenly in a digit-based manner such that the local dataset for each worker contains only a certain class of digits. The number of digits in each worker's dataset represents the non-i.i.d. degree. For digits_10, each worker has training/testing samples with ten digits from 0 to 9, which is essentially an i.i.d. case. For digits_1, each worker has samples only associated with one digit, which leads to highly non-i.i.d. datasets among workers. For partial worker participation, we set the number of workers $n = 10$ in each communication round.
|
| 177 |
+
|
| 178 |
+
Impact of non-i.i.d. datasets: As shown in Figure 1(a), for the 2NN model with full worker participation, the top-row figures are for training loss versus communication round and the bottom row are for test accuracy versus communication round. We can see that the generalized FedAvg algorithm converges under non-i.i.d. datasets with a proper learning rate choice in both cases. For five digits (digits_5) in each worker's dataset with full (partial) worker participation in Figure 1(a), the generalized FedAvg algorithm achieves a convergence speed comparable to that of the i.i.d. case (digits_10). Another key observation is that non-i.i.d. datasets slow down the convergence under the same learning rate settings for both cases. The higher the non-i.i.d. degree, the slower the convergence speed. As the non-i.i.d. degree increases (from case digits_10 to case digits_1), it is obvious that the training loss is increasing and test accuracy is decreasing. This trend is more obvious from the zigzagging curves for partial worker participation. These two observations can also be verified for other models as shown in the supplementary material, which confirms our theoretical analysis.
|
| 179 |
+
|
| 180 |
+
Impact of worker number: As shown in Figure 1(b), we compare the training loss and test accuracy between full worker participation $n = 100$ and partial worker participation $n = 10$ with the same hyper-parameters. Compared with full worker participation, partial worker participation introduces another source of randomness, which leads to zigzagging convergence curves and slower convergence. This problem is more prominent for highly non-i.i.d. datasets. For full worker participation, it can neutralize the system heterogeneity in each communication round. However, it might not be able to neutralize the gaps among different workers for partial worker participation. That is, the datasets' distribution does not approximate the overall distribution well. Specifically, it is not unlikely that the digits in these datasets among all active workers are only a proper subset of the total 10 digits in the original MNIST dataset, especially with highly non-i.i.d. datasets. This trend is also obvious for complex models and complicated datasets as shown in the supplementary material. The sampling strategy here is random sampling with equal probability without replacement. In practice, however, the actual sampling of the workers in FL could be more complex, which requires further investigations.
|
| 181 |
+
|
| 182 |
+
Impact of local steps: One open question of FL is that whether the local steps help the convergence or not. In Figure 1(c), we show that the local steps could help the convergence for both full and partial worker participation. These results verify our theoretical analysis. However, Li et al. (2019b) showed that the local steps may hurt the convergence, which was demonstrated under unbalanced non-i.i.d. MNIST datasets. We believe that this may be due to the combined effect of unbalanced datasets and local steps rather than just the use of local steps only.
|
| 183 |
+
|
| 184 |
+
Table 2: Comparison with SCAFFOLD.
|
| 185 |
+
|
| 186 |
+
<table><tr><td rowspan="2">Dataset</td><td rowspan="2">IID or Non-IID</td><td rowspan="2">Worker selected</td><td rowspan="2">Model</td><td colspan="3">SCAFFOLD</td><td colspan="3">This paper</td></tr><tr><td># of Round</td><td>Communication cost (MB)</td><td>Wall-clock time (s)</td><td># of Round</td><td>Communication cost (MB)</td><td>Wall-clock time (s)</td></tr><tr><td rowspan="12">MNIST</td><td rowspan="6">IID</td><td rowspan="3">n = 10</td><td>Logistic</td><td>3</td><td>0.36</td><td>0.32</td><td>3</td><td>0.18</td><td>0.22</td></tr><tr><td>2NN</td><td>3</td><td>9.12</td><td>0.88</td><td>3</td><td>4.56</td><td>0.56</td></tr><tr><td>CNN</td><td>3</td><td>26.64</td><td>2.23</td><td>3</td><td>13.32</td><td>1.57</td></tr><tr><td rowspan="3">n = 100</td><td>Logistic</td><td>5</td><td>0.60</td><td>0.53</td><td>5</td><td>0.30</td><td>0.42</td></tr><tr><td>2NN</td><td>5</td><td>15.20</td><td>1.51</td><td>8</td><td>12.16</td><td>1.49</td></tr><tr><td>CNN</td><td>1</td><td>8.88</td><td>0.79</td><td>1</td><td>4.44</td><td>0.50</td></tr><tr><td rowspan="6">Non-IID</td><td rowspan="3">n = 10</td><td>Logistic</td><td>14</td><td>1.68</td><td>1.48</td><td>14</td><td>0.84</td><td>1.16</td></tr><tr><td>2NN</td><td>14</td><td>42.55</td><td>4.23</td><td>14</td><td>21.28</td><td>2.46</td></tr><tr><td>CNN</td><td>14</td><td>124.34</td><td>11.12</td><td>10</td><td>44.41</td><td>4.92</td></tr><tr><td rowspan="3">n = 100</td><td>Logistic</td><td>7</td><td>0.84</td><td>0.72</td><td>11</td><td>0.66</td><td>0.91</td></tr><tr><td>2NN</td><td>7</td><td>21.28</td><td>2.11</td><td>17</td><td>25.84</td><td>3.16</td></tr><tr><td>CNN</td><td>17</td><td>150.98</td><td>13.50</td><td>7</td><td>31.08</td><td>3.51</td></tr><tr><td rowspan="2">CIFAR-10</td><td>IID</td><td>n = 10</td><td>Resnet18</td><td>56</td><td>9548.07</td><td>583.24</td><td>44</td><td>3751.03</td><td>256.63</td></tr><tr><td>Non-IID</td><td>n = 10</td><td>Resnet18</td><td>52</td><td>8866.06</td><td>539.50</td><td>61</td><td>5200.29</td><td>358.22</td></tr></table>
|
| 187 |
+
|
| 188 |
+
Bandwidth $= 20\mathrm{MB / s}$
|
| 189 |
+
|
| 190 |
+
Comparison with SCAFFOLD: Lastly, we compare with the SCAFFOLD algorithm (Karimireddy et al., 2019) since it also achieves the same linear speedup effect under non-i.i.d. datasets. We compare communication rounds, total communication load, and estimated wall-clock time under the same settings to achieve certain test accuracy, and the results are reported in Table 2. The non-i.i.d. dataset is digits_2 and the i.i.d. dataset is digits_10. The learning rates are $\eta_L = 0.1$ , $\eta = 1.0$ , and number of local steps $K$ is 5 epochs. We set the target accuracy $\epsilon = 95\%$ for MNIST and $\epsilon = 75\%$ for CIFAR-10. Note that the total training time contains two parts: i) the computation time for training the local model at each worker and ii) the communication time for information exchanges between the workers and the server. We assume the bandwidth $20~\mathrm{MB / s}$ for both uplink and downlink connections. For MNIST datasets, we can see that our algorithm is similar to or outperforms SCAFFOLD. This is because the numbers of communication rounds of both algorithms are relatively small for such simple tasks. For non-i.i.d. CIFAR-10, the SCAFFOLD algorithm takes slightly fewer number of communication rounds than our FedAvg algorithm to achieve $\epsilon = 75\%$ thanks to its variance reduction. However, it takes more than 1.5 times of communication cost and wall-clock time compared to those of our FedAvg algorithm. Due to space limitation, we relegate the results of time proportions for computation and communication to Appendix B (see Figure 7).
|
| 191 |
+
|
| 192 |
+
# 6 CONCLUSIONS AND FUTURE WORK
|
| 193 |
+
|
| 194 |
+
In this paper, we analyzed the convergence of a generalized FedAvg algorithm with two-sided learning rates on non-i.i.d. datasets for general non-convex optimization. We proved that the generalized FedAvg algorithm achieves a linear speedup for convergence under full and partial worker participation. We showed that the local steps in FL could help the convergence and we improve the maximum number of local steps to $T / m$ . While our work sheds light on theoretical understanding of FL, it also opens the doors to many new interesting questions in FL, such as how to sample optimally in partial worker participation, and how to deal with active participant sets that are both time-varying and size-varying across communication rounds. We hope that the insights and proof techniques in this paper can pave the way for many new research directions in the aforementioned areas.
|
| 195 |
+
|
| 196 |
+
# ACKNOWLEDGEMENTS
|
| 197 |
+
|
| 198 |
+
This work is supported in part by NSF grants CAREER CNS-1943226, CIF-2110252, ECCS-1818791, CCF-1934884, ONR grant ONR N00014-17-1-2417, and a Google Faculty Research Award.
|
| 199 |
+
|
| 200 |
+
# REFERENCES
|
| 201 |
+
|
| 202 |
+
Léon Bottou, Frank E Curtis, and Jorge Nocedal. Optimization methods for large-scale machine learning. Siam Review, 60(2):223-311, 2018.
|
| 203 |
+
Hubert Eichner, Tomer Koren, H Brendan McMahan, Nathan Srebro, and Kunal Talwar. Semi-cyclic stochastic gradient descent. arXiv preprint arXiv:1904.10120, 2019.
|
| 204 |
+
Saeed Ghadimi and Guanghui Lan. Stochastic first-and zeroth-order methods for nonconvex stochastic programming. SIAM Journal on Optimization, 23(4):2341-2368, 2013.
|
| 205 |
+
Tzu-Ming Harry Hsu, Hang Qi, and Matthew Brown. Measuring the effects of non-identical data distribution for federated visual classification. arXiv preprint arXiv:1909.06335, 2019.
|
| 206 |
+
Li Huang, Yifeng Yin, Zeng Fu, Shifa Zhang, Hao Deng, and Dianbo Liu. Loadaboost: Loss-based adaboost federated machine learning on medical data. arXiv preprint arXiv:1811.12629, 2018.
|
| 207 |
+
Eunjeong Jeong, Seungeun Oh, Hyesung Kim, Jihong Park, Mehdi Bennis, and Seong-Lyun Kim. Communication-efficient on-device machine learning: Federated distillation and augmentation under non-iid private data. arXiv preprint arXiv:1811.11479, 2018.
|
| 208 |
+
Peter Kairouz, H Brendan McMahan, Brendan Avent, Aurélien Bellet, Mehdi Bennis, Arjun Nitin Bhagoji, Keith Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, et al. Advances and open problems in federated learning. arXiv preprint arXiv:1912.04977, 2019.
|
| 209 |
+
Sai Praneeth Karimireddy, Satyen Kale, Mehryar Mohri, Sashank J Reddi, Sebastian U Stich, and Ananda Theertha Suresh. Scaffold: Stochastic controlled averaging for on-device federated learning. arXiv preprint arXiv:1910.06378, 2019.
|
| 210 |
+
Ahmed Khaled, Konstantin Mishchenko, and Peter Richtárik. Better communication complexity for local sgd. arXiv preprint arXiv:1909.04746, 2019a.
|
| 211 |
+
Ahmed Khaled, Konstantin Mishchenko, and Peter Richtárik. First analysis of local gd on heterogeneous data. arXiv preprint arXiv:1909.04715, 2019b.
|
| 212 |
+
Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009.
|
| 213 |
+
Yann LeCun, Léon Bottou, Yoshua Bengio, and Patrick Haffner. Gradient-based learning applied to document recognition. Proceedings of the IEEE, 86(11):2278-2324, 1998.
|
| 214 |
+
Tian Li, Anit Kumar Sahu, Manzil Zaheer, Maziar Sanjabi, Ameet Talwalkar, and Virginia Smith. Federated optimization in heterogeneous networks. arXiv preprint arXiv:1812.06127, 2018.
|
| 215 |
+
Tian Li, Anit Kumar Sahu, Ameet Talwalkar, and Virginia Smith. Federated learning: Challenges, methods, and future directions. arXiv preprint arXiv:1908.07873, 2019a.
|
| 216 |
+
Xiang Li, Kaixuan Huang, Wenhao Yang, Shusen Wang, and Zhihua Zhang. On the convergence of fedavg on non-iid data. arXiv preprint arXiv:1907.02189, 2019b.
|
| 217 |
+
Tao Lin, Sebastian U Stich, Kumar Kshitij Patel, and Martin Jaggi. Don't use large mini-batches, use local sgd. arXiv preprint arXiv:1808.07217, 2018.
|
| 218 |
+
H Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, et al. Communication-efficient learning of deep networks from decentralized data. arXiv preprint arXiv:1602.05629, 2016.
|
| 219 |
+
Sashank Reddi, Zachary Charles, Manzil Zaheer, Zachary Garrett, Keith Rush, Jakub Konecny, Sanjiv Kumar, and H Brendan McMahan. Adaptive federated optimization. arXiv preprint arXiv:2003.00295, 2020.
|
| 220 |
+
Felix Sattler, Simon Wiedemann, Klaus-Robert Müller, and Wojciech Samek. Robust and communication-efficient federated learning from non-iid data. IEEE transactions on neural networks and learning systems, 2019.
|
| 221 |
+
Sebastian U Stich. Local sgd converges fast and communicates little. arXiv preprint arXiv:1805.09767, 2018.
|
| 222 |
+
|
| 223 |
+
Sebastian U Stich and Sai Praneeth Karimireddy. The error-feedback framework: Better rates for sgd with delayed gradients and compressed communication. arXiv preprint arXiv:1909.05350, 2019.
|
| 224 |
+
Sebastian U Stich, Jean-Baptiste Cordonnier, and Martin Jaggi. Sparsified sgd with memory. In Advances in Neural Information Processing Systems, pp. 4447-4458, 2018.
|
| 225 |
+
Jianyu Wang and Gauri Joshi. Cooperative sgd: A unified framework for the design and analysis of communication-efficient sgd algorithms. arXiv preprint arXiv:1808.07576, 2018.
|
| 226 |
+
Jianyu Wang, Vinayak Tantia, Nicolas Ballas, and Michael Rabbat. Slowmo: Improving communication-efficient distributed sgd with slow momentum. arXiv preprint arXiv:1910.00643, 2019a.
|
| 227 |
+
Shiqiang Wang, Tiffany Tuor, Theodoros Salonidis, Kin K Leung, Christian Makaya, Ting He, and Kevin Chan. Adaptive federated learning in resource constrained edge computing systems. IEEE Journal on Selected Areas in Communications, 37(6):1205-1221, 2019b.
|
| 228 |
+
Hao Yu, Rong Jin, and Sen Yang. On the linear speedup analysis of communication efficient momentum sgd for distributed non-convex optimization. arXiv preprint arXiv:1905.03817, 2019a.
|
| 229 |
+
Hao Yu, Sen Yang, and Shenghuo Zhu. Parallel restarted sgd with faster convergence and less communication: Demystifying why model averaging works for deep learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 5693-5700, 2019b.
|
| 230 |
+
Yue Zhao, Meng Li, Liangzhen Lai, Naveen Suda, Damon Civin, and Vikas Chandra. Federated learning with non-iid data. arXiv preprint arXiv:1806.00582, 2018.
|
| 231 |
+
Fan Zhou and Guojing Cong. On the convergence properties of a $k$ -step averaging stochastic gradient descent algorithm for nonconvex optimization. arXiv preprint arXiv:1708.01012, 2017.
|
| 232 |
+
|
| 233 |
+
# A APPENDIX I: PROOFS
|
| 234 |
+
|
| 235 |
+
In this section, we give the proofs in detail for full and partial worker participation in Section A.1 and Section A.2, respectively.
|
| 236 |
+
|
| 237 |
+
# A.1 PROOF OF THEOREM 1
|
| 238 |
+
|
| 239 |
+
Theorem 1. Let constant local and global learning rates $\eta_L$ and $\eta$ be chosen as such that $\eta_L \leq \frac{1}{8KL}$ and $\eta \eta_L \leq \frac{1}{KL}$ . Under Assumptions 1-3 and with full worker participation, the sequence of outputs $\{\mathbf{x}_k\}$ generated by Algorithm 1 satisfies:
|
| 240 |
+
|
| 241 |
+
$$
|
| 242 |
+
\min _ {t \in [ T ]} \mathbb {E} [ \| \nabla f (\mathbf {x} _ {t}) \| _ {2} ^ {2} ] \leq \frac {f _ {0} - f _ {*}}{c \eta \eta_ {L} K T} + \Phi ,
|
| 243 |
+
$$
|
| 244 |
+
|
| 245 |
+
where $\Phi \triangleq \frac{1}{c} \left[ \frac{L \eta \eta_L}{2m} \sigma_L^2 + \frac{5K \eta_L^2 L^2}{2} (\sigma_L^2 + 6K \sigma_G^2) \right]$ , $c$ is a constant, $f_0 \triangleq f(\mathbf{x}_0)$ , $f_* \triangleq f(\mathbf{x}_*)$ and the expectation is over the local dataset samples among workers.
|
| 246 |
+
|
| 247 |
+
Proof. For convenience, we define $\bar{\Delta}_t \triangleq \frac{1}{m} \sum_{i=1}^{m} \Delta_t^i$ . Under full device participation (i.e., $S_t = [m]$ ), it is clear that $\Delta_t = \frac{1}{m} \sum_{i=1}^{m} \Delta_t^i = \bar{\Delta}_t$ .
|
| 248 |
+
|
| 249 |
+
Due to the smoothness in Assumption 1, taking expectation of $f(\mathbf{x}_{t + 1})$ over the randomness at communication round $t$ , we have:
|
| 250 |
+
|
| 251 |
+
$$
|
| 252 |
+
\begin{array}{l} \mathbb {E} _ {t} \left[ f \left(\mathbf {x} _ {t + 1}\right) \right] \leq f \left(\mathbf {x} _ {t}\right) + \left\langle \nabla f \left(\mathbf {x} _ {t}\right), \mathbb {E} _ {t} \left[ \mathbf {x} _ {t + 1} - \mathbf {x} _ {t} \right] \right\rangle + \frac {L}{2} \mathbb {E} _ {t} \left[ \| \mathbf {x} _ {t + 1} - \mathbf {x} _ {t} \| ^ {2} \right] \\ = f \left(\mathbf {x} _ {t}\right) + \left\langle \nabla f \left(\mathbf {x} _ {t}\right), \mathbb {E} _ {t} \left[ \eta \bar {\Delta} _ {t} + \eta \eta_ {L} K \nabla f \left(\mathbf {x} _ {t}\right) - \eta \eta_ {L} K \nabla f \left(\mathbf {x} _ {t}\right) \right] \right\rangle + \frac {L}{2} \eta^ {2} \mathbb {E} _ {t} \left[ \| \bar {\Delta} _ {t} \| ^ {2} \right] \\ = f \left(\mathbf {x} _ {t}\right) - \eta \eta_ {L} K \| \nabla f \left(\mathbf {x} _ {t}\right) \| ^ {2} + \eta \underbrace {\left\langle \nabla f \left(\mathbf {x} _ {t}\right) , \mathbb {E} _ {t} [ \bar {\Delta} _ {t} + \eta_ {L} K \nabla f \left(\mathbf {x} _ {t}\right) ] \right\rangle} _ {A _ {1}} + \frac {L}{2} \eta^ {2} \underbrace {\mathbb {E} _ {t} [ \| \bar {\Delta} _ {t} \| ^ {2} ]} _ {A _ {2}}. \tag {1} \\ \end{array}
|
| 253 |
+
$$
|
| 254 |
+
|
| 255 |
+
Note that the term $A_{1}$ in (1) can be bounded as follows:
|
| 256 |
+
|
| 257 |
+
$$
|
| 258 |
+
\begin{array}{l} A _ {1} = \left\langle \nabla f (\mathbf {x} _ {t}), \mathbb {E} _ {t} [ \bar {\Delta} _ {t} + \eta_ {L} K \nabla f (\mathbf {x} _ {t}) ] \right\rangle \\ = \left\langle \nabla f (\mathbf {x} _ {t}), \mathbb {E} _ {t} \left[ - \frac {1}{m} \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \eta_ {L} \mathbf {g} _ {t, k} ^ {i} + \eta_ {L} K \nabla f (x _ {t}) \right] \right\rangle \\ = \left\langle \nabla f (\mathbf {x} _ {t}), \mathbb {E} _ {t} \left[ - \frac {1}{m} \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \eta_ {L} \nabla F _ {i} \left(\mathbf {x} _ {t, k} ^ {i}\right) + \eta_ {L} K \frac {1}{m} \sum_ {i = 1} ^ {m} \nabla F _ {i} \left(\mathbf {x} _ {t}\right) \right] \right\rangle \\ = \left\langle \sqrt {\eta_ {L} K} \nabla f (\mathbf {x} _ {t}), - \frac {\sqrt {\eta_ {L}}}{m \sqrt {K}} \mathbb {E} _ {t} \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \left(\nabla F _ {i} \left(\mathbf {x} _ {t, k} ^ {i}\right) - \nabla F _ {i} (\mathbf {x} _ {t})\right) \right\rangle \\ \end{array}
|
| 259 |
+
$$
|
| 260 |
+
|
| 261 |
+
$$
|
| 262 |
+
\begin{array}{l} \stackrel {(a 1)} {=} \frac {\eta_ {L} K}{2} \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {\eta_ {L}}{2 K m ^ {2}} \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \nabla F _ {i} (\mathbf {x} _ {t, k} ^ {i}) - \nabla F _ {i} (\mathbf {x} _ {t}) \right\| ^ {2} - \frac {\eta_ {L}}{2 K m ^ {2}} \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \nabla F _ {i} (\mathbf {x} _ {t, k} ^ {i}) \right\| ^ {2} \\ \stackrel {(a 2)} {\leq} \frac {\eta_ {L} K}{2} \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {\eta_ {L}}{2 m} \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \mathbb {E} _ {t} \| \nabla F _ {i} (\mathbf {x} _ {t, k} ^ {i}) - \nabla F _ {i} (\mathbf {x} _ {t}) \| ^ {2} - \frac {\eta_ {L}}{2 K m ^ {2}} \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \nabla F _ {i} (\mathbf {x} _ {t, k} ^ {i}) \right\| ^ {2} \\ \stackrel {(a 3)} {\leq} \frac {\eta_ {L} K}{2} \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {\eta_ {L} L ^ {2}}{2 m} \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \mathbb {E} _ {t} \| \mathbf {x} _ {t, k} ^ {i} - \mathbf {x} _ {t} \| ^ {2} - \frac {\eta_ {L}}{2 K m ^ {2}} \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \nabla F _ {i} (\mathbf {x} _ {t, k} ^ {i}) \right\| ^ {2} \\ \stackrel {(a 4)} {\leq} \eta_ {L} K \left(\frac {1}{2} + 1 5 K ^ {2} \eta_ {L} ^ {2} L ^ {2}\right) \| \nabla f (x _ {t}) \| ^ {2} + \frac {5 K ^ {2} \eta_ {L} ^ {3} L ^ {2}}{2} \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right) - \frac {\eta_ {L}}{2 K m ^ {2}} \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \nabla F _ {i} \left(\mathbf {x} _ {t, k} ^ {i}\right) \right\| ^ {2}, \tag {2} \\ \end{array}
|
| 263 |
+
$$
|
| 264 |
+
|
| 265 |
+
where (a1) follows from that $\langle \mathbf{x},\mathbf{y}\rangle = \frac{1}{2} [\| \mathbf{x}\| ^2 +\| \mathbf{y}\| ^2 -\| \mathbf{x} - \mathbf{y}\| ^2 ]$ for $\mathbf{x} = \sqrt{\eta_LK}\nabla f(\mathbf{x}_t)$ and $\mathbf{y} = -\frac{\sqrt{\eta_L}}{m\sqrt{K}}\sum_{i = 1}^{m}\sum_{k = 0}^{K - 1}(\nabla F_i(\mathbf{x}_{t,k}^i) - \nabla F_i(\mathbf{x}_t)),$ (a2) is due to that $\mathbb{E}[\| x_1 + \dots +x_n\| ^2 ]\leq n\mathbb{E}[\| x_1\| ^2 +\dots +\| x_n\| ^2 ],$ (a3) is due to Assumption 1 and (a4) follows from Lemma 2.
|
| 266 |
+
|
| 267 |
+
The term $A_{2}$ in (1) can be bounded as:
|
| 268 |
+
|
| 269 |
+
$$
|
| 270 |
+
\begin{array}{l} A _ {2} = \mathbb {E} _ {t} [ \| \bar {\Delta} _ {t} \| ^ {2} ] \\ = \mathbb {E} _ {t} \left[ \left\| \frac {1}{m} \sum_ {i = 1} ^ {m} \Delta_ {t} ^ {i} \right\| ^ {2} \right] \\ \leq \frac {1}{m ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {i = 1} ^ {m} \Delta_ {t} ^ {i} \right\| ^ {2} \right] \\ = \frac {\eta_ {L} ^ {2}}{m ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \mathbf {g} _ {t, k} ^ {i} \right\| ^ {2} \right] \\ \stackrel {(a 5)} {=} \frac {\eta_ {L} ^ {2}}{m ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \left(\mathbf {g} _ {t, k} ^ {i} - \nabla F _ {i} \left(\mathbf {x} _ {t, k} ^ {i}\right)\right) \right\| ^ {2} \right] + \frac {\eta_ {L} ^ {2}}{m ^ {2}} \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \nabla F _ {i} \left(\mathbf {x} _ {t, k} ^ {i}\right) \right\| ^ {2} \\ \stackrel {(a 6)} {\leq} \frac {K \eta_ {L} ^ {2}}{m} \sigma_ {L} ^ {2} + \frac {\eta_ {L} ^ {2}}{m ^ {2}} \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \nabla F _ {i} \left(\mathbf {x} _ {t, k} ^ {i}\right) \right\| ^ {2}, \tag {3} \\ \end{array}
|
| 271 |
+
$$
|
| 272 |
+
|
| 273 |
+
where (a5) follows from the fact that $\mathbb{E}[\| \mathbf{x}\| ^2 ] = \mathbb{E}[\| \mathbf{x} - \mathbb{E}[\mathbf{x}]\| ^2 ] + \| \mathbb{E}[\mathbf{x}]\| ^2 ]$ and (a6) is due to the bounded variance assumption in Assumption 3 and the fact that $\mathbb{E}[\| x_1 + \dots +x_n\| ^2 ] = \mathbb{E}[\| x_1\| ^2 +\dots +\| x_n\| ^2 ]$ if $x_{i}^{\prime}\mathrm{s}$ are independent with zero mean and $\mathbb{E}[\mathbf{g}_{t,j}^{i}] = \nabla F_{i}(\mathbf{x}_{t,j}^{i})$
|
| 274 |
+
|
| 275 |
+
Substituting the inequalities in (2) of $A_{1}$ and (3) of $A_{2}$ into inequality (1), we have:
|
| 276 |
+
|
| 277 |
+
$$
|
| 278 |
+
\mathbb {E} _ {t} \left[ f (\mathbf {x} _ {t + 1}) \right] \leq f (\mathbf {x} _ {t}) - \eta \eta_ {L} K \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \eta \underbrace {- \nabla f (\mathbf {x} _ {t}) , \mathbb {E} _ {t} \left[ \bar {\Delta} _ {t} + \eta_ {L} K \nabla f (\mathbf {x} _ {t}) \right]} _ {A _ {1}} + \frac {L}{2} \eta^ {2} \underbrace {\mathbb {E} _ {t} \left[ \| \bar {\Delta} _ {t} \| ^ {2} \right]} _ {A _ {2}}
|
| 279 |
+
$$
|
| 280 |
+
|
| 281 |
+
$$
|
| 282 |
+
\begin{array}{l} \leq f (\mathbf {x} _ {t}) - \eta \eta_ {L} K \left(\frac {1}{2} - 1 5 K ^ {2} \eta_ {L} ^ {2} L ^ {2}\right) \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 m} \sigma_ {L} ^ {2} \\ + \frac {5 \eta K ^ {2} \eta_ {L} ^ {3} L ^ {2}}{2} \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right) - \left(\frac {\eta \eta_ {L}}{2 K m ^ {2}} - \frac {L \eta^ {2} \eta_ {L} ^ {2}}{2 m ^ {2}}\right) \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \nabla F _ {i} \left(\mathbf {x} _ {t, k} ^ {i}\right) \right\| ^ {2} \\ \end{array}
|
| 283 |
+
$$
|
| 284 |
+
|
| 285 |
+
$$
|
| 286 |
+
\begin{array}{l} \stackrel {(a 7)} {\leq} f (\mathbf {x} _ {t}) - \eta \eta_ {L} K \left(\frac {1}{2} - 5 K ^ {2} \eta_ {L} ^ {2} L ^ {2}\right) \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 m} \sigma_ {L} ^ {2} + \frac {5 \eta K ^ {2} \eta_ {L} ^ {3} L ^ {2}}{2} \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right) \\ \stackrel {(a 8)} {\leq} f (\mathbf {x} _ {t}) - c \eta \eta_ {L} K \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 m} \sigma_ {L} ^ {2} + \frac {5 \eta K ^ {2} \eta_ {L} ^ {3} L ^ {2}}{2} (\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}), \\ \end{array}
|
| 287 |
+
$$
|
| 288 |
+
|
| 289 |
+
where (a7) follows from $\left(\frac{\eta\eta_L}{2Km^2} -\frac{L\eta^2\eta_L^2}{2m^2}\right)\geq 0$ if $\eta \eta_{L}\leq \frac{1}{KL}$ , (a8) holds because there exists a constant $c > 0$ satisfying $\left(\frac{1}{2} -15K^{2}\eta_{L}^{2}L^{2}\right) > c > 0$ if $\eta_L < \frac{1}{\sqrt{30}KL}$ .
|
| 290 |
+
|
| 291 |
+
Rearranging and summing from $t = 0,\dots ,T - 1$ , we have:
|
| 292 |
+
|
| 293 |
+
$$
|
| 294 |
+
\sum_ {t = 0} ^ {T - 1} c \eta \eta_ {L} K \mathbb {E} [ \nabla f (\mathbf {x} _ {t}) ] \leq f (\mathbf {x} _ {0}) - f (\mathbf {x} _ {T}) + T (\eta \eta_ {L} K) \left[ \frac {L \eta \eta_ {L}}{2 m} \sigma_ {L} ^ {2} + \frac {5 K \eta_ {L} ^ {2} L ^ {2}}{2} (\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}) \right]
|
| 295 |
+
$$
|
| 296 |
+
|
| 297 |
+
which implies,
|
| 298 |
+
|
| 299 |
+
$$
|
| 300 |
+
\min _ {t \in [ T ]} \mathbb {E} \| \nabla f (\mathbf {x} _ {t}) \| _ {2} ^ {2} \leq \frac {f _ {0} - f _ {*}}{c \eta \eta_ {L} K T} + \Phi ,
|
| 301 |
+
$$
|
| 302 |
+
|
| 303 |
+
where $\Phi = \frac{1}{c} \left[ \frac{L \eta \eta_L}{2m} \sigma_L^2 + \frac{5K \eta_L^2 L^2}{2} (\sigma_L^2 + 6K \sigma_G^2) \right]$ . This completes the proof.
|
| 304 |
+
|
| 305 |
+
# A.2 PROOF OF THEOREM 2
|
| 306 |
+
|
| 307 |
+
Theorem 2. Under Assumptions 1-3 with partial worker participation, the sequence of outputs $\{\mathbf{x}_k\}$ generated by Algorithm 1 with constant learning rates $\eta$ and $\eta_L$ satisfies:
|
| 308 |
+
|
| 309 |
+
$$
|
| 310 |
+
\min _ {t \in [ T ]} \mathbb {E} \left[ \| \nabla f (\mathbf {x} _ {t}) \| _ {2} ^ {2} \right] \leq \frac {f _ {0} - f _ {*}}{c \eta \eta_ {L} K T} + \Phi ,
|
| 311 |
+
$$
|
| 312 |
+
|
| 313 |
+
where $f_0 = f(\mathbf{x}_0)$ , $f_* = f(\mathbf{x}_*)$ , and the expectation is over the local dataset samples among workers.
|
| 314 |
+
|
| 315 |
+
For sampling Strategy 1, let $\eta$ and $\eta_L$ be chosen as such that $\eta_L \leq \frac{1}{8LK}$ , $\eta_{L}KL < \frac{n - 1}{n}$ and $30K^2\eta_L^2 L^2 - \frac{L\eta_{L}}{n} (90K^3 L^2\eta_L^2 + 3K) < 1$ . It then holds that:
|
| 316 |
+
|
| 317 |
+
$$
|
| 318 |
+
\Phi \triangleq \frac {1}{c} \left[ \frac {L \eta \eta_ {L}}{2 n} \sigma_ {L} ^ {2} + \frac {3 L K \eta \eta_ {L}}{2 n} \sigma_ {G} ^ {2} + (\frac {5 K \eta_ {L} ^ {2} L ^ {2}}{2} + \frac {1 5 K ^ {2} \eta \eta_ {L} ^ {3} L ^ {3}}{2 n}) (\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}) \right].
|
| 319 |
+
$$
|
| 320 |
+
|
| 321 |
+
For sampling Strategy 2, let $\eta$ and $\eta_L$ be chosen as such that $\eta_L \leq \frac{1}{8LK}$ , $\eta \eta_L KL \leq \frac{n(m-1)}{m(n-1)}$ and $10K^2 \eta_L^2 L^2 - L \eta \eta_L \frac{m-n}{n(m-1)} (90K^3 \eta_L^2 L^2 + 3K) < 1$ . It then holds that:
|
| 322 |
+
|
| 323 |
+
$$
|
| 324 |
+
\Phi \triangleq \frac {1}{c} \bigg [ \frac {L \eta \eta_ {L}}{2 n} \sigma_ {L} ^ {2} + 3 L K \eta \eta_ {L} \frac {m - n}{2 n (m - 1)} \sigma_ {G} ^ {2} + \bigg (\frac {5 K \eta_ {L} ^ {2} L ^ {2}}{2} + 1 5 K ^ {2} \eta \eta_ {L} ^ {3} L ^ {3} \frac {m - n}{2 n (m - 1)} \bigg) (\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}) \bigg ].
|
| 325 |
+
$$
|
| 326 |
+
|
| 327 |
+
Proof. Let $\bar{\Delta}_t$ be defined the same as in the proof of Theorem 1. Under partial device participation, note that $\bar{\Delta}_t \neq \Delta_t$ (recall that $\bar{\Delta}_t \triangleq \frac{1}{m} \sum_{i=1}^{m} \Delta_t^i$ , $\Delta_t = \frac{1}{n} \sum_{i \in S_t} \Delta_t^i$ , and $|S_t| = n$ ). The randomness for partial worker participation contains two parts: the random sampling and the stochastic gradient. We still use $\mathbb{E}_t[\cdot]$ to represent the expectation with respect to both types of randomness.
|
| 328 |
+
|
| 329 |
+
Due to the smoothness assumption in Assumption 1, taking expectation of $f(\mathbf{x}_{t + 1})$ over the randomness at communication round t:
|
| 330 |
+
|
| 331 |
+
$$
|
| 332 |
+
\begin{array}{l} \mathbb {E} _ {t} \left[ f \left(\mathbf {x} _ {t + 1}\right) \right] \leq f \left(\mathbf {x} _ {t}\right) + \left\langle \nabla f \left(\mathbf {x} _ {t}\right), \mathbb {E} _ {t} \left[ \mathbf {x} _ {t + 1} - \mathbf {x} _ {t} \right] \right\rangle + \frac {L}{2} \mathbb {E} _ {t} \left[ \| \mathbf {x} _ {t + 1} - \mathbf {x} _ {t} \| ^ {2} \right] \\ = f (\mathbf {x} _ {t}) + \left\langle \nabla f (\mathbf {x} _ {t}), \mathbb {E} _ {t} \left[ \eta \Delta_ {t} + \eta \eta_ {L} K \nabla f (\mathbf {x} _ {t}) - \eta \eta_ {L} K \nabla f (\mathbf {x} _ {t}) \right] \right\rangle + \frac {L}{2} \eta^ {2} \mathbb {E} _ {t} [ \| \Delta_ {t} \| ^ {2} ] \\ = f \left(\mathbf {x} _ {t}\right) - \eta \eta_ {L} K \| \nabla f \left(\mathbf {x} _ {t}\right) \| ^ {2} + \eta \underbrace {\left\langle \nabla f \left(\mathbf {x} _ {t}\right) , \mathbb {E} _ {t} \left[ \Delta_ {t} + \eta_ {L} K \nabla f \left(\mathbf {x} _ {t}\right) \right] \right\rangle} _ {A _ {1} ^ {\prime}} + \frac {L}{2} \eta^ {2} \underbrace {\mathbb {E} _ {t} [ \| \Delta_ {t} \| ^ {2} ]} _ {A _ {2} ^ {\prime}} \tag {4} \\ \end{array}
|
| 333 |
+
$$
|
| 334 |
+
|
| 335 |
+
The term $A_1'$ in (4) can be bounded as follows: Since $\mathbb{E}_{S_t}[A_1'] = A_1$ due to Lemma 1 for both sampling strategies, we have the same bound as in inequality 2 for $A_1'$ :
|
| 336 |
+
|
| 337 |
+
$$
|
| 338 |
+
\begin{array}{l} A _ {1} ^ {\prime} \leq \eta_ {L} K (\frac {1}{2} + 1 5 K ^ {2} \eta_ {L} ^ {2} L ^ {2}) \| \nabla f (x _ {t}) \| ^ {2} + \frac {5 K ^ {2} \eta_ {L} ^ {3} L ^ {2}}{2} (\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}) \\ - \frac {\eta_ {L}}{2 K m ^ {2}} \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \nabla F _ {i} \left(\mathbf {x} _ {t, k} ^ {i}\right) \right\| ^ {2}, \tag {5} \\ \end{array}
|
| 339 |
+
$$
|
| 340 |
+
|
| 341 |
+
For strategy 1: We can bound $A_2'$ in (4) as follows.
|
| 342 |
+
|
| 343 |
+
Note $S_{t}$ is an index set (multiset) for independent sampling (equal probability) with replacement in which some elements may have the same value. Suppose $S_{t} = \{l_{1},\ldots ,l_{n}\}$ .
|
| 344 |
+
|
| 345 |
+
$$
|
| 346 |
+
\begin{array}{l} A _ {2} ^ {\prime} = \mathbb {E} _ {t} [ \| \Delta_ {t} \| ^ {2} ] \\ = \mathbb {E} _ {t} \left[ \left\| \frac {1}{n} \sum_ {i \in S _ {t}} \Delta_ {t} ^ {i} \right\| ^ {2} \right] \\ \end{array}
|
| 347 |
+
$$
|
| 348 |
+
|
| 349 |
+
$$
|
| 350 |
+
\begin{array}{l} = \frac {1}{n ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {i \in S _ {t}} \Delta_ {t} ^ {i} \right\| ^ {2} \right] \\ = \frac {1}{n ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {z = 1} ^ {n} \Delta_ {t} ^ {l _ {z}} \right\| ^ {2} \right] \\ \stackrel {(b 1)} {=} \frac {\eta_ {L} ^ {2}}{n ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {z = 1} ^ {n} \sum_ {j = 0} ^ {K - 1} \left[ \mathbf {g} _ {t, j} ^ {l _ {z}} - \nabla F _ {l _ {z}} (\mathbf {x} _ {t, j} ^ {l _ {z}}) \right] \right\| ^ {2} \right] + \frac {\eta_ {L} ^ {2}}{n ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {z = 1} ^ {n} \sum_ {j = 0} ^ {K - 1} \nabla F _ {l _ {z}} (\mathbf {x} _ {t, j} ^ {l _ {z}}) \right\| ^ {2} \right] \\ \stackrel {(b 2)} {\leq} \frac {K \eta_ {L} ^ {2}}{n} \sigma_ {L} ^ {2} + \frac {\eta_ {L} ^ {2}}{n ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {z = 1} ^ {n} \sum_ {j = 0} ^ {K - 1} \nabla F _ {l _ {z}} (\mathbf {x} _ {t, j} ^ {l _ {z}}) \right\| ^ {2} \right], \\ \end{array}
|
| 351 |
+
$$
|
| 352 |
+
|
| 353 |
+
where (b1) follows from the fact that $\mathbb{E}[\| \mathbf{x}\| ^2 ] = \mathbb{E}[\| \mathbf{x} - \mathbb{E}[\mathbf{x}]\| ^2 ] + \| \mathbb{E}[\mathbf{x}]\| ^2 ]$ and (b2) is due to the bounded variance assumption 3 and $\mathbb{E}[||x_1 + \dots +x_n\| ^2 ]\leq n\mathbb{E}[\| x_1\| ^2 +\dots +\| x_n\| ^2 ]$
|
| 354 |
+
|
| 355 |
+
By letting $\mathbf{t}_i = \sum_{j=0}^{K-1} \nabla F_i(\mathbf{x}_{t,j}^i)$ , we have:
|
| 356 |
+
|
| 357 |
+
$$
|
| 358 |
+
\begin{array}{l} \mathbb {E} _ {t} \left[ \right.\left\| \sum_ {z = 1} ^ {n} \sum_ {j = 0} ^ {K - 1} \nabla F _ {l _ {z}} \left(\mathbf {x} _ {t, j} ^ {l _ {z}}\right)\right\| ^ {2} = \mathbb {E} _ {t} \left[\left\| \sum_ {z = 1} ^ {n} \mathbf {t} _ {l _ {z}} \right\| ^ {2} \right] \\ = \mathbb {E} _ {t} \left[ \sum_ {z = 1} ^ {n} \| \mathbf {t} _ {l _ {z}} \| ^ {2} + \sum_ {i \neq j; l _ {i}, l _ {j} \in S _ {t}} \left\langle \mathbf {t} _ {l _ {i}}, \mathbf {t} _ {l _ {j}} \right\rangle \right] \\ \stackrel {(b 3)} {=} \mathbb {E} _ {t} \left[ n \| \mathbf {t} _ {l _ {1}} \| ^ {2} + n (n - 1) \langle \mathbf {t} _ {l _ {1}}, \mathbf {t} _ {l _ {2}} \rangle \right] \\ = \frac {n}{m} \sum_ {i = 1} ^ {m} \| \mathbf {t} _ {i} \| ^ {2} + \frac {n (n - 1)}{m ^ {2}} \sum_ {i, j \in [ m ]} \left\langle \mathbf {t} _ {i}, \mathbf {t} _ {j} \right\rangle \\ = \frac {n}{m} \sum_ {i = 1} ^ {m} \| \mathbf {t} _ {i} \| ^ {2} + \frac {n (n - 1)}{m ^ {2}} \| \sum_ {i = 1} ^ {m} \mathbf {t} _ {i} \| ^ {2}, \\ \end{array}
|
| 359 |
+
$$
|
| 360 |
+
|
| 361 |
+
where $(b3)$ is due to the independent sampling with replacement.
|
| 362 |
+
|
| 363 |
+
So we can bound $A_2'$ as follows.
|
| 364 |
+
|
| 365 |
+
$$
|
| 366 |
+
\begin{array}{l} A _ {2} ^ {\prime} = \mathbb {E} _ {t} [ \| \Delta_ {t} \| ^ {2} ] \\ \leq \frac {K \eta_ {L} ^ {2}}{n} \sigma_ {L} ^ {2} + \frac {\eta_ {L} ^ {2}}{m n} \sum_ {i = 1} ^ {m} \mathbb {E} _ {t} \| \mathbf {t} _ {i} \| ^ {2} + \frac {(n - 1) \eta_ {L} ^ {2}}{m ^ {2} n} \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \mathbf {t} _ {i} \right\| ^ {2}, \tag {6} \\ \end{array}
|
| 367 |
+
$$
|
| 368 |
+
|
| 369 |
+
For $\mathbf{t}_i$ , we have:
|
| 370 |
+
|
| 371 |
+
$$
|
| 372 |
+
\begin{array}{l} \sum_ {i = 1} ^ {m} \mathbb {E} _ {t} \| \mathbf {t} _ {i} \| ^ {2} = \sum_ {i = 1} ^ {m} \mathbb {E} _ {t} \left\| \sum_ {j = 0} ^ {K - 1} \nabla F _ {i} \left(\mathbf {x} _ {t, j} ^ {i}\right) - \nabla F _ {i} (\mathbf {x} _ {t}) + \nabla F _ {i} (\mathbf {x} _ {t}) - \nabla f (\mathbf {x} _ {t}) + \nabla f (\mathbf {x} _ {t}) \right\| ^ {2} \\ \stackrel {(b 4)} {\leq} 3 K L ^ {2} \sum_ {i = 1} ^ {m} \sum_ {j = 0} ^ {K - 1} \mathbb {E} _ {t} \left\| \mathbf {x} _ {t, j} ^ {i} - \mathbf {x} _ {t} \right\| ^ {2} + 3 m K ^ {2} \sigma_ {G} ^ {2} + 3 m K ^ {2} \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} \\ \stackrel {(b 5)} {\leq} 1 5 m K ^ {3} L ^ {2} \eta_ {L} ^ {2} \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right) + \left(9 0 m K ^ {4} L ^ {2} \eta_ {L} ^ {2} + 3 m K ^ {2}\right) \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + 3 m K ^ {2} \sigma_ {G} ^ {2}, \tag {7} \\ \end{array}
|
| 373 |
+
$$
|
| 374 |
+
|
| 375 |
+
where (b4) is due to the fact that $\mathbb{E}[\| x_1 + \dots +x_n\|^2 ]\leq n\mathbb{E}[\| x_1\|^2 +\dots +\| x_n\|^2 ]$ , Assumptions 3 and 1, and (b5) follows from Lemma 2.
|
| 376 |
+
|
| 377 |
+
Substituting the inequalities in (5) of $A_1'$ and (6) of $A_2'$ into inequality (4), we have:
|
| 378 |
+
|
| 379 |
+
$$
|
| 380 |
+
\begin{array}{l} \mathbb{E}_{t}[f(\mathbf{x}_{t + 1})]\leq f(\mathbf{x}_{t}) - \eta \eta_{L}K\| \nabla f(\mathbf{x}_{t})\|^{2} + \eta \underbrace{\left\langle\nabla f(\mathbf{x}_{t}),\mathbb{E}_{t}[\Delta_{t} + \eta_{L}K\nabla f(\mathbf{x}_{t})]\right\rangle}_{A_{1}^{\prime}} + \frac{L}{2}\eta^{2}\underbrace{\mathbb{E}_{t}[\| \Delta_{t}\|^{2}]}_{A_{2}^{\prime}} \\ \leq f (\mathbf {x} _ {t}) - \eta \eta_ {L} K \left(\frac {1}{2} - 1 5 K ^ {2} \eta_ {L} ^ {2} L ^ {2}\right) \| \nabla f (x _ {t}) \| ^ {2} + \frac {5 \eta K ^ {2} \eta_ {L} ^ {3} L ^ {2}}{2} \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right) \\ + \left[ \frac {(n - 1) L \eta^ {2} \eta_ {L} ^ {2}}{2 m ^ {2} n} - \frac {\eta \eta_ {L}}{2 K m ^ {2}} \right] \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \mathbf {t} _ {i} \right\| ^ {2} + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 n} \sigma_ {L} ^ {2} + \frac {L \eta^ {2} \eta_ {L} ^ {2}}{2 m n} \sum_ {i = 1} ^ {m} \mathbb {E} _ {t} \| \mathbf {t} _ {i} \| ^ {2} \\ \stackrel {(b 6)} {\leq} f (\mathbf {x} _ {t}) - \eta \eta_ {L} K (\frac {1}{2} - 1 5 K ^ {2} \eta_ {L} ^ {2} L ^ {2}) \| \nabla f (x _ {t}) \| ^ {2} + \frac {5 \eta K ^ {2} \eta_ {L} ^ {3} L ^ {2}}{2} (\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}) \\ + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 n} \sigma_ {L} ^ {2} + \frac {L \eta^ {2} \eta_ {L} ^ {2}}{2 m n} \sum_ {i = 1} ^ {m} \mathbb {E} _ {t} \| \mathbf {t} _ {i} \| ^ {2} \\ \stackrel {(b 7)} {\leq} f (\mathbf {x} _ {t}) - \eta \eta_ {L} K \left(\frac {1}{2} - 1 5 K ^ {2} \eta_ {L} ^ {2} L ^ {2} - \frac {L \eta \eta_ {L}}{2 n} \left(9 0 K ^ {3} L ^ {2} \eta_ {L} ^ {2} + 3 K\right)\right) \| \nabla f (x _ {t}) \| ^ {2} \\ + \left[ \frac {5 \eta K ^ {2} \eta_ {L} ^ {3} L ^ {2}}{2} + \frac {1 5 K ^ {3} L ^ {3} \eta^ {2} \eta_ {L} ^ {4}}{2 n} \right] \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right) + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 n} \sigma_ {L} ^ {2} + \frac {3 K ^ {2} L \eta^ {2} \eta_ {L} ^ {2}}{2 n} \sigma_ {G} ^ {2} \\ \stackrel {(b 8)} {\leq} f (\mathbf {x} _ {t}) - c \eta \eta_ {L} K \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 n} \sigma_ {L} ^ {2} + \frac {3 K ^ {2} L \eta^ {2} \eta_ {L} ^ {2}}{2 n} \sigma_ {G} ^ {2} \\ + \eta \eta_ {L} K \left[ \frac {5 K \eta_ {L} ^ {2} L ^ {2}}{2} + \frac {1 5 K ^ {2} \eta_ {L} ^ {3} \eta L ^ {3}}{2 n} \right] \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right), \tag {8} \\ \end{array}
|
| 381 |
+
$$
|
| 382 |
+
|
| 383 |
+
where (b6) follows from $\frac{(n - 1)L\eta^2\eta_L^2}{2m^2n} -\frac{\eta\eta_L}{2Km^2}\leq 0$ if $\eta \eta_{L}KL\leq \frac{n - 1}{n}$ , (b7) is due to inequality (7) and (b8) holds since there exists a constant $c > 0$ such that $\left[\frac{1}{2} -15K^{2}\eta_{L}^{2}L^{2} - \frac{L\eta\eta_{L}}{2n} (90K^{3}L^{2}\eta_{L}^{2} + 3K)\right]>$ $c > 0$ if $30K^{2}\eta_{L}^{2}L^{2} - \frac{L\eta\eta_{L}}{n} (90K^{3}L^{2}\eta_{L}^{2} + 3K) < 1.$
|
| 384 |
+
|
| 385 |
+
Note that the requirement of $|S_t| = n$ can be relaxed to $|S_t| \geq n$ . With $p_t \geq n$ workers in $t$ -th communication round, 8 is
|
| 386 |
+
|
| 387 |
+
$$
|
| 388 |
+
\begin{array}{l} \mathbb {E} _ {t} [ f (\mathbf {x} _ {t + 1}) ] \leq f (\mathbf {x} _ {t}) - c \eta \eta_ {L} K \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 p _ {t}} \sigma_ {L} ^ {2} + \frac {3 K L \eta^ {2} \eta_ {L} ^ {2}}{2 p _ {t}} \sigma_ {G} ^ {2} \\ + \eta \eta_ {L} K \left[ \frac {5 K \eta_ {L} ^ {2} L ^ {2}}{2} + \frac {1 5 K \eta_ {L} ^ {3} \eta L ^ {3}}{2 p _ {t}} \right] \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right) \\ \leq f (\mathbf {x} _ {t}) - c \eta \eta_ {L} K \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 n} \sigma_ {L} ^ {2} + \frac {3 K ^ {2} L \eta^ {2} \eta_ {L} ^ {2}}{2 n} \sigma_ {G} ^ {2} \\ + \eta \eta_ {L} K \left[ \frac {5 K \eta_ {L} ^ {2} L ^ {2}}{2} + \frac {1 5 K ^ {2} \eta_ {L} ^ {3} \eta L ^ {3}}{2 n} \right] (\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}). \\ \end{array}
|
| 389 |
+
$$
|
| 390 |
+
|
| 391 |
+
That is, the same convergence rate can be guaranteed if at least $n$ workers in each communication round (no need to be exactly $n$ ).
|
| 392 |
+
|
| 393 |
+
Rearranging and summing from $t = 0, \dots, T - 1$ , we have the convergence for partial device participation with sampling strategy 1 as follows:
|
| 394 |
+
|
| 395 |
+
$$
|
| 396 |
+
\min _ {t \in [ T ]} \mathbb {E} [ \| \nabla f (\mathbf {x} _ {t}) \| _ {2} ^ {2} ] \leq \frac {f _ {0} - f _ {*}}{c \eta \eta_ {L} K T} + \Phi ,
|
| 397 |
+
$$
|
| 398 |
+
|
| 399 |
+
where $\Phi = \frac{1}{c}\left[\frac{L\eta_{L}}{2n}\sigma_L^2 +\frac{3KL\eta_L}{2n}\sigma_G^2 +\left(\frac{5K\eta_L^2L^2}{2} +\frac{15K^2\eta_L^3L^3}{2n}\right)\left(\sigma_L^2 +6K\sigma_G^2\right)\right]$ and $c$ is a constant.
|
| 400 |
+
|
| 401 |
+
For strategy 2: Under the strategy of independent sampling with equal probability without replacement. We bound $A_2'$ as follows.
|
| 402 |
+
|
| 403 |
+
$$
|
| 404 |
+
\begin{array}{l} A _ {2} ^ {\prime} = \mathbb {E} _ {t} [ \| \Delta_ {t} \| ^ {2} ] \\ = \mathbb {E} _ {t} \left[ \left\| \frac {1}{n} \sum_ {i \in S _ {t}} \Delta_ {t} ^ {i} \right\| ^ {2} \right] \\ \end{array}
|
| 405 |
+
$$
|
| 406 |
+
|
| 407 |
+
$$
|
| 408 |
+
\begin{array}{l} = \frac {1}{n ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {i \in S _ {t}} \Delta_ {t} ^ {i} \right\| ^ {2} \right] \\ = \frac {1}{n ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {i = 1} ^ {m} \mathbb {I} \{i \in S _ {t} \} \Delta_ {t} ^ {i} \right\| ^ {2} \right] \\ = \frac {\eta_ {L} ^ {2}}{n ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {i = 1} ^ {m} \mathbb {I} \{i \in S _ {t} \} \sum_ {j = 0} ^ {K - 1} [ \mathbf {g} _ {t, j} ^ {i} - \nabla F _ {i} (\mathbf {x} _ {t, j} ^ {i}) ] \right\| ^ {2} \right] + \frac {\eta_ {L} ^ {2}}{n ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {i = 1} ^ {m} \mathbb {I} \{i \in S _ {t} \} \sum_ {j = 0} ^ {K - 1} \nabla F _ {i} (\mathbf {x} _ {t, j} ^ {i}) ] \right\| ^ {2} \right] \\ = \frac {\eta_ {L} ^ {2}}{n ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {i = 1} ^ {m} \mathbb {P} \left\{i \in S _ {t} \right\} \sum_ {j = 0} ^ {K - 1} \left[ \mathbf {g} _ {t, j} ^ {i} - \nabla F _ {i} \left(\mathbf {x} _ {t, j} ^ {i}\right) \right] \right\| ^ {2} + \frac {\eta_ {L} ^ {2}}{n ^ {2}} \left\| \sum_ {i = 1} ^ {m} \mathbb {I} \left\{i \in S _ {t} \right\} \sum_ {j = 0} ^ {K - 1} \nabla F _ {i} \left(\mathbf {x} _ {t, j} ^ {i}\right) \right\| ^ {2} \right] \\ \stackrel {(b 9)} {=} \frac {\eta_ {L} ^ {2}}{n m} \mathbb {E} _ {t} \left[ \sum_ {i = 1} ^ {m} \sum_ {j = 0} ^ {K - 1} \left\| \mathbf {g} _ {t, j} ^ {i} - \nabla F _ {i} (\mathbf {x} _ {t, j} ^ {i}) \right\| ^ {2} \right] + \frac {\eta_ {L} ^ {2}}{n ^ {2}} \mathbb {E} _ {t} \left[ \left\| \sum_ {i = 1} ^ {m} \mathbb {I} \{i \in S _ {t} \} \sum_ {j = 0} ^ {K - 1} \nabla F _ {i} (\mathbf {x} _ {t, j} ^ {i}) \right\| ^ {2} \right] \\ \stackrel {(b 1 0)} {\leq} \frac {K \eta_ {L} ^ {2}}{n} \sigma_ {L} ^ {2} + \frac {\eta_ {L} ^ {2}}{n ^ {2}} \left\| \sum_ {i = 1} ^ {m} \mathbb {P} \{i \in S _ {t} \} \sum_ {j = 0} ^ {K - 1} \nabla F _ {i} \left(\mathbf {x} _ {t, j} ^ {i}\right) \right\| ^ {2}, \tag {9} \\ \end{array}
|
| 409 |
+
$$
|
| 410 |
+
|
| 411 |
+
where (b9) is due to the fact that $\mathbb{E}[\| x_1 + \dots +x_n\| ^2 ] = \mathbb{E}[\| x_1\| ^2 +\dots +\| x_n\| ^2 ]$ if $x_{i}^{\prime}$ s are independent with zero mean, $\mathbf{x}_i = \mathbf{g}_{t,j}^i -\nabla F_i(\mathbf{x}_{t,j}^i)$ is independent random variable with mean zero, and $\mathbb{P}\{i\in S_t\} = \frac{n}{m}$ . (b10) is due to bounded variance assumption in Assumption 3
|
| 412 |
+
|
| 413 |
+
Substituting the inequalities in (5) of $A_1'$ and (9) of $A_2'$ into inequality (4), we have:
|
| 414 |
+
|
| 415 |
+
$$
|
| 416 |
+
\begin{array}{l} \mathbb{E}_{t}[f(\mathbf{x}_{t + 1})]\leq f(\mathbf{x}_{t}) - \eta \eta_{L}K\| \nabla f(\mathbf{x}_{t})\|^{2} + \eta \underbrace{\left\langle\nabla f(\mathbf{x}_{t}),\mathbb{E}_{t}[\Delta_{t} + \eta_{L}K\nabla f(\mathbf{x}_{t})]\right\rangle}_{A^{\prime}_{1}} + \frac{L}{2}\eta^{2}\underbrace{\mathbb{E}_{t}[\| \Delta_{t}\|^{2}]}_{A^{\prime}_{2}} \\ \leq \nabla f (\mathbf {x} _ {t}) - \eta \eta_ {L} K (\frac {1}{2} - 1 5 K ^ {2} \eta_ {L} ^ {2} L ^ {2}) \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 n} \sigma_ {L} ^ {2} + \frac {5 \eta K ^ {2} \eta_ {L} ^ {3} L ^ {2}}{2} (\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}) \\ + \underbrace {\frac {L \eta^ {2} \eta_ {L} ^ {2}}{2 n ^ {2}} \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \mathbb {P} \{i \in S _ {t} \} \sum_ {j = 0} ^ {K - 1} \nabla F _ {i} (\mathbf {x} _ {t , j} ^ {i}) \right\| ^ {2} - \frac {\eta \eta_ {L}}{2 K m ^ {2}} \mathbb {E} _ {t} \left\| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \nabla F _ {i} (\mathbf {x} _ {t , k} ^ {i}) \right\| ^ {2}} _ {A _ {3} ^ {\prime}}. \\ \end{array}
|
| 417 |
+
$$
|
| 418 |
+
|
| 419 |
+
Then we bound $A_3'$ as follows.
|
| 420 |
+
|
| 421 |
+
By letting $\mathbf{t}_i = \sum_{j=0}^{K-1} \nabla F_i(\mathbf{x}_{t,j}^i)$ , we have:
|
| 422 |
+
|
| 423 |
+
$$
|
| 424 |
+
\sum_ {i = 1} ^ {m} \mathbb {E} _ {t} \| \mathbf {t} _ {i} \| ^ {2} \leq 1 5 m K ^ {3} L ^ {2} \eta_ {L} ^ {2} \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right) + \left(9 0 m K ^ {4} L ^ {2} \eta_ {L} ^ {2} + 3 m K ^ {2}\right) \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + 3 m K ^ {2} \sigma_ {G} ^ {2}.
|
| 425 |
+
$$
|
| 426 |
+
|
| 427 |
+
It then follows that
|
| 428 |
+
|
| 429 |
+
$$
|
| 430 |
+
\| \sum_ {i = 1} ^ {m} \mathbf {t} _ {i} \| ^ {2} = \sum_ {i \in [ m ]} \| \mathbf {t} _ {i} \| ^ {2} + \sum_ {i \neq j} < \mathbf {t} _ {i}, \mathbf {t} _ {j} >
|
| 431 |
+
$$
|
| 432 |
+
|
| 433 |
+
$$
|
| 434 |
+
\stackrel {(b 1 1)} {=} \sum_ {i \in [ m ]} m \| \mathbf {t} _ {i} \| ^ {2} - \frac {1}{2} \sum_ {i \neq j} \| \mathbf {t} _ {i} - \mathbf {t} _ {j} \| ^ {2}
|
| 435 |
+
$$
|
| 436 |
+
|
| 437 |
+
$$
|
| 438 |
+
\begin{array}{l} \| \sum_ {i = 1} ^ {m} \mathbb {P} \{i \in S _ {t} \} \mathbf {t} _ {i} \| ^ {2} = \sum_ {i \in [ m ]} \mathbb {P} \{i \in S _ {t} \} \| \mathbf {t} _ {i} \| ^ {2} + \sum_ {i \neq j} \mathbb {P} \{i, j \in S _ {t} \} < \mathbf {t} _ {i}, \mathbf {t} _ {j} > \\ \stackrel {(b 1 2)} {=} \frac {n}{m} \sum_ {i \in [ m ]} \| \mathbf {t} _ {i} \| ^ {2} + \frac {n (n - 1)}{m (m - 1)} \sum_ {i \neq j} < \mathbf {t} _ {i}, \mathbf {t} _ {j} > \\ \end{array}
|
| 439 |
+
$$
|
| 440 |
+
|
| 441 |
+
$$
|
| 442 |
+
\stackrel {(b 1 3)} {=} \frac {n ^ {2}}{m} \sum_ {i \in [ m ]} \| \mathbf {t} _ {i} \| ^ {2} - \frac {n (n - 1)}{2 m (m - 1)} \sum_ {i \neq j} \| \mathbf {t} _ {i} - \mathbf {t} _ {j} \| ^ {2},
|
| 443 |
+
$$
|
| 444 |
+
|
| 445 |
+
where (b11) and (b13) are due to the fact that $\langle \mathbf{x},\mathbf{y}\rangle = \frac{1}{2} [||\mathbf{x}||^2 +||\mathbf{y}||^2 -||\mathbf{x} - \mathbf{y}||^2 ]\leq \frac{1}{2} [||\mathbf{x}||^2 +||\mathbf{y}||^2 ],$ (b12) follows from the fact that $\mathbb{P}\{i\in S_t\} = \frac{n}{m}$ and $\mathbb{P}\{i,j\in S_t\} = \frac{n(n - 1)}{m(m - 1)}$ . Therefore, we have
|
| 446 |
+
|
| 447 |
+
$$
|
| 448 |
+
\begin{array}{l} A _ {3} ^ {'} = \frac {L \eta^ {2} \eta_ {L} ^ {2}}{2 n ^ {2}} \| \sum_ {i = 1} ^ {m} \mathbb {P} \{i \in S _ {t} \} \sum_ {j = 0} ^ {K - 1} \nabla F _ {i} (\mathbf {x} _ {t, j} ^ {i}) ] \| ^ {2} - \frac {\eta \eta_ {L}}{2 K m ^ {2}} \| \sum_ {i = 1} ^ {m} \sum_ {k = 0} ^ {K - 1} \nabla F _ {i} (\mathbf {x} _ {t, k} ^ {i}) \| ^ {2} \\ = \left(\frac {L \eta^ {2} \eta_ {L} ^ {2}}{2 m} - \frac {\eta \eta_ {L}}{2 K m}\right) \sum_ {i = 1} ^ {m} \| \mathbf {t} _ {i} \| ^ {2} + \left(\frac {\eta \eta_ {L}}{4 K m ^ {2}} - \frac {L \eta^ {2} \eta_ {L} ^ {2} (n - 1)}{4 m n (m - 1)}\right) \sum_ {i \neq j} \| \mathbf {t} _ {i} - \mathbf {t} _ {j} \| ^ {2} \\ \stackrel {(b 1 4)} {=} \left(\frac {L \eta^ {2} \eta_ {L} ^ {2}}{2 m} - \frac {L \eta^ {2} \eta_ {L} ^ {2} (n - 1)}{2 n (m - 1)}\right) \sum_ {i = 1} ^ {m} \| \mathbf {t} _ {i} \| ^ {2} - \left(\frac {\eta \eta_ {L}}{2 K m ^ {2}} - \frac {L \eta^ {2} \eta_ {L} ^ {2} (n - 1)}{2 m n (m - 1)}\right) \| \sum_ {i \in [ m ]} \mathbf {t} _ {i} \| ^ {2} \\ \stackrel {(b 1 5)} {\leq} \left(\frac {L \eta^ {2} \eta_ {L} ^ {2}}{2 m} - \frac {L \eta^ {2} \eta_ {L} ^ {2} (n - 1)}{2 n (m - 1)}\right) \sum_ {i = 1} ^ {m} \| \mathbf {t} _ {i} \| ^ {2} \\ = L \eta^ {2} \eta_ {L} ^ {2} \frac {m - n}{2 m n (m - 1)} \sum_ {i = 1} ^ {m} \| \mathbf {t} _ {i} \| ^ {2}, \\ \end{array}
|
| 449 |
+
$$
|
| 450 |
+
|
| 451 |
+
where (b14) follows from the fact that $\| \sum_{i\in [m]}\mathbf{t}_i\|^2 = \sum_{i\in [m]}m\| \mathbf{t}_i\|^2 -\frac{1}{2}\sum_{i\neq j}\| \mathbf{t}_i - \mathbf{t}_j\|^2$ and (b15) is due to the fact that $(\frac{\eta\eta_L}{2Km^2} -\frac{L\eta^2\eta_L^2(n - 1)}{2mn(m - 1)})\geq 0$ if $\eta \eta_LKL\leq \frac{n(m - 1)}{m(n - 1)}$
|
| 452 |
+
|
| 453 |
+
Then we have
|
| 454 |
+
|
| 455 |
+
$$
|
| 456 |
+
\begin{array}{l} \mathbb {E} _ {t} \left[ f (\mathbf {x} _ {t + 1}) \right] \leq f (\mathbf {x} _ {t}) - \eta \eta_ {L} K \left(\frac {1}{2} - 1 5 K ^ {2} \eta_ {L} ^ {2} L ^ {2} - L \eta \eta_ {L} \frac {m - n}{2 n (m - 1)} \left(9 0 K ^ {3} \eta_ {L} ^ {2} L ^ {2} + 3 K\right)\right) \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} \\ + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 n} \sigma_ {L} ^ {2} + 3 K ^ {2} L \eta^ {2} \eta_ {L} ^ {2} \frac {m - n}{2 n (m - 1)} \sigma_ {G} ^ {2} \\ + \eta \eta_ {L} K \left(\frac {5 K \eta_ {L} ^ {2} L ^ {2}}{2} + 1 5 K \eta \eta_ {L} ^ {3} L ^ {3} \frac {m - n}{2 n (m - 1)}\right) \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right) \\ \stackrel {(b 1 6)} {\leq} f (\mathbf {x} _ {t}) - c \eta \eta_ {L} K \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 n} \sigma_ {L} ^ {2} + 3 K L \eta^ {2} \eta_ {L} ^ {2} \frac {m - n}{2 n (m - 1)} \sigma_ {G} ^ {2} \\ + \eta \eta_ {L} K \left(\frac {5 K \eta_ {L} ^ {2} L ^ {2}}{2} + 1 5 K ^ {2} \eta \eta_ {L} ^ {3} L ^ {3} \frac {m - n}{2 n (m - 1)}\right) \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right), \tag {10} \\ \end{array}
|
| 457 |
+
$$
|
| 458 |
+
|
| 459 |
+
where (b16) holds because there exists a constant $c > 0$ satisfying $\left(\frac{1}{2} -5K^{2}\eta_{L}^{2}L^{2} - L\eta \eta_{L}\frac{m - n}{2n(m - 1)} (90K^{3}\eta_{L}^{2}L^{2} + 3K)\right) > c > 0$ if $10K^{2}\eta_{L}^{2}L^{2} - L\eta \eta_{L}\frac{m - n}{n(m - 1)} (90K^{3}\eta_{L}^{2}L^{2} + 3K) < 1$ .
|
| 460 |
+
|
| 461 |
+
Note that the requirement of $|S_t| = n$ can be relaxed to $|S_t| \geq n$ . With $p_t \geq n$ workers in $t$ -th communication round, 10 is
|
| 462 |
+
|
| 463 |
+
$$
|
| 464 |
+
\begin{array}{l} \mathbb {E} _ {t} [ f (\mathbf {x} _ {t + 1}) ] \leq f (\mathbf {x} _ {t}) - c \eta \eta_ {L} K \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 p _ {t}} \sigma_ {L} ^ {2} + 3 K L \eta^ {2} \eta_ {L} ^ {2} \frac {m - p _ {t}}{2 p _ {t} (m - 1)} \sigma_ {G} ^ {2} \\ + \eta \eta_ {L} K \left(\frac {5 K \eta_ {L} ^ {2} L ^ {2}}{2} + 1 5 K ^ {2} \eta \eta_ {L} ^ {3} L ^ {3} \frac {m - p _ {t}}{2 p _ {t} (m - 1)}\right) \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right) \\ \leq f (\mathbf {x} _ {t}) - c \eta \eta_ {L} K \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} + \frac {L K \eta^ {2} \eta_ {L} ^ {2}}{2 n} \sigma_ {L} ^ {2} + 3 K L \eta^ {2} \eta_ {L} ^ {2} \frac {m - n}{2 n (m - 1)} \sigma_ {G} ^ {2} \\ + \eta \eta_ {L} K \left(\frac {5 K \eta_ {L} ^ {2} L ^ {2}}{2} + 1 5 K ^ {2} \eta \eta_ {L} ^ {3} L ^ {3} \frac {m - n}{2 n (m - 1)}\right) \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right) \\ \end{array}
|
| 465 |
+
$$
|
| 466 |
+
|
| 467 |
+
That is, the same convergence rate can be guaranteed if at least $n$ workers in each communication round (no need to be exactly $n$ ).
|
| 468 |
+
|
| 469 |
+
Rearranging and summing from $t = 0, \dots, T - 1$ , we have the convergence for partial device participation with sampling strategy 2 as follows:
|
| 470 |
+
|
| 471 |
+
$$
|
| 472 |
+
\min _ {t \in [ T ]} \mathbb {E} \left[ \| \nabla f (\mathbf {x} _ {t}) \| _ {2} ^ {2} \right] \leq \frac {f _ {0} - f _ {*}}{c \eta \eta_ {L} K T} + \Phi ,
|
| 473 |
+
$$
|
| 474 |
+
|
| 475 |
+
where $\Phi = \frac{1}{c}\left[\frac{L\eta\eta_L}{2n}\sigma_L^2 +3KL\eta \eta_L\frac{m - n}{2n(m - 1)}\sigma_G^2 +\left(\frac{5K\eta_L^2L^2}{2} +15K^2\eta \eta_L^3 L^3\frac{m - n}{2n(m - 1)}\right)(\sigma_L^2 +6K\sigma_G^2)\right]$ and $c$ is a constant. This completes the proof.
|
| 476 |
+
|
| 477 |
+
# A.2.1 KEY LEMMAS
|
| 478 |
+
|
| 479 |
+
Lemma 1 (Unbiased Sampling). For strategies 1 and 2, the estimator $\Delta_t$ is unbiased, i.e.,
|
| 480 |
+
|
| 481 |
+
$$
|
| 482 |
+
\mathbb {E} _ {S _ {t}} \left[ \Delta_ {t} \right] = \bar {\Delta} _ {t}.
|
| 483 |
+
$$
|
| 484 |
+
|
| 485 |
+
Proof of Lemma 1.
|
| 486 |
+
|
| 487 |
+
Let $S_{t} = \{t_{1},\dots ,t_{n}\}$ with size $n$ . Both for sampling strategies 1 and 2, each sampling distribution is identical. Then we have:
|
| 488 |
+
|
| 489 |
+
$$
|
| 490 |
+
\mathbb {E} _ {S _ {t}} \left[ \Delta_ {t} \right] = \frac {1}{n} \mathbb {E} _ {S _ {t}} \left[ \sum_ {t _ {i} \in S _ {t}} \Delta_ {t} ^ {t _ {i}} \right] = \frac {1}{n} \mathbb {E} _ {S _ {t}} \left[ \sum_ {i = 1} ^ {n} \Delta_ {t} ^ {t _ {i}} \right] = \mathbb {E} _ {S _ {t}} \left[ \Delta_ {t} ^ {t _ {1}} \right] = \frac {1}{m} \sum_ {i = 1} ^ {m} \Delta_ {t} ^ {i} = \bar {\Delta} _ {t}.
|
| 491 |
+
$$
|
| 492 |
+
|
| 493 |
+
# A.3 AUXILIARY LEMMAS
|
| 494 |
+
|
| 495 |
+
Lemma 2 (Lemma 4 in Reddi et al. (2020)). For any step-size satisfying $\eta_L \leq \frac{1}{8LK}$ , we can have the following results:
|
| 496 |
+
|
| 497 |
+
$$
|
| 498 |
+
\frac {1}{m} \sum_ {i = 1} ^ {m} \mathbb {E} \left[ \left\| \mathbf {x} _ {t, k} ^ {i} - \mathbf {x} _ {t} \right\| ^ {2} \right] \leq 5 K \eta_ {L} ^ {2} \left(\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}\right) + 3 0 K ^ {2} \eta_ {L} ^ {2} \| \nabla f (\mathbf {x} _ {t}) \| ^ {2}.
|
| 499 |
+
$$
|
| 500 |
+
|
| 501 |
+
Proof. In order for this paper to be self-contained, we restate the proof of Lemma 4 in (Reddi et al., 2020) here.
|
| 502 |
+
|
| 503 |
+
For any worker $i \in [m]$ and $k \in [K]$ , we have:
|
| 504 |
+
|
| 505 |
+
$$
|
| 506 |
+
\begin{array}{l} \mathbb {E} [ \| \mathbf {x} _ {t, k} ^ {i} - \mathbf {x} _ {t} \| ^ {2} ] = \mathbb {E} [ \| \mathbf {x} _ {t, k - 1} ^ {i} - \mathbf {x} _ {t} - \eta_ {L} g _ {t, k - 1} ^ {t} \| ^ {2} ] \\ \leq \mathbb {E} [ \| \mathbf {x} _ {t, k - 1} ^ {i} - \mathbf {x} _ {t} - \eta_ {L} (g _ {t, k - 1} ^ {t} - \nabla F _ {i} (\mathbf {x} _ {t, k - 1} ^ {i}) + \nabla F _ {i} (\mathbf {x} _ {t, k - 1} ^ {i}) - \nabla F _ {i} (\mathbf {x} _ {t}) + \nabla F _ {i} (\mathbf {x} _ {t}) - \nabla f (\mathbf {x} _ {t}) + \nabla f (\mathbf {x} _ {t})) \| ^ {2} ] \\ \leq (1 + \frac {1}{2 K - 1}) \mathbb {E} [ \| \mathbf {x} _ {t, k - 1} ^ {i} - \mathbf {x} _ {t} \| ^ {2} ] + \mathbb {E} [ \| \eta_ {L} (g _ {t, k - 1} ^ {t} - \nabla F _ {i} (\mathbf {x} _ {t, k - 1} ^ {i})) \| ^ {2} ] \\ + 6 K \mathbb {E} [ \| \eta_ {L} (\nabla F _ {i} (\mathbf {x} _ {t, k - 1} ^ {i}) - \nabla F _ {i} (\mathbf {x} _ {t})) \| ^ {2} ] + 6 K \mathbb {E} [ \| \eta_ {L} (\nabla F _ {i} (\mathbf {x} _ {t}) - \nabla f (\mathbf {x} _ {t}))) \| ^ {2} ] + 6 K \| \eta_ {L} \nabla f (\mathbf {x} _ {t}) \| ^ {2} \\ \leq (1 + \frac {1}{2 K - 1}) \mathbb {E} [ \| \mathbf {x} _ {t, k - 1} ^ {i} - \mathbf {x} _ {t} \| ^ {2} ] + \eta_ {L} ^ {2} \sigma_ {L} ^ {2} + 6 K \eta_ {L} ^ {2} L ^ {2} \mathbb {E} [ \| \mathbf {x} _ {t, k - 1} ^ {i} - \mathbf {x} _ {t} \| ^ {2} ] + 6 K \eta_ {L} ^ {2} \sigma_ {G} ^ {2} + 6 K \| \eta_ {L} \nabla f (\mathbf {x} _ {t}) \| ^ {2} \\ = (1 + \frac {1}{2 K - 1} + 6 K \eta_ {L} ^ {2} L ^ {2}) \mathbb {E} [ \| \mathbf {x} _ {t, k - 1} ^ {i} - \mathbf {x} _ {t} \| ^ {2} ] + \eta_ {L} ^ {2} \sigma_ {L} ^ {2} + 6 K \eta_ {L} ^ {2} \sigma_ {G} ^ {2} + 6 K \| \eta_ {L} \nabla f (\mathbf {x} _ {t}) \| ^ {2} \\ \leq (1 + \frac {1}{K - 1}) \mathbb {E} [ \| \mathbf {x} _ {t, k - 1} ^ {i} - \mathbf {x} _ {t} \| ^ {2} ] + \eta_ {L} ^ {2} \sigma_ {L} ^ {2} + 6 K \eta_ {L} ^ {2} \sigma_ {G} ^ {2} + 6 K \| \eta_ {L} \nabla f (\mathbf {x} _ {t}) \| ^ {2} \\ \end{array}
|
| 507 |
+
$$
|
| 508 |
+
|
| 509 |
+
Unrolling the recursion, we get:
|
| 510 |
+
|
| 511 |
+
$$
|
| 512 |
+
\begin{array}{l} \frac {1}{m} \sum_ {i = 1} ^ {m} \mathbb {E} [ \| \mathbf {x} _ {t, k} ^ {i} - \mathbf {x} _ {t} \| ^ {2} ] \leq \sum_ {p = 0} ^ {k - 1} (1 + \frac {1}{K - 1}) ^ {p} [ \eta_ {L} ^ {2} \sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2} + 6 K \eta_ {L} ^ {2} \| \eta_ {L} \nabla f (\mathbf {x} _ {t})) \| ^ {2} ] \\ \leq (K - 1) [ (1 + \frac {1}{K - 1}) ^ {K} - 1 ] [ \eta_ {L} ^ {2} \sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2} + 6 K \eta_ {L} ^ {2} \| \eta_ {L} \nabla f (\mathbf {x} _ {t})) \| ^ {2} ] \\ \leq 5 K \eta_ {L} ^ {2} (\sigma_ {L} ^ {2} + 6 K \sigma_ {G} ^ {2}) + 3 0 K ^ {2} \eta_ {L} ^ {2} \| \nabla f (\mathbf {x} _ {t}) \| ^ {2} \\ \end{array}
|
| 513 |
+
$$
|
| 514 |
+
|
| 515 |
+
This completes the proof.
|
| 516 |
+
|
| 517 |
+
# B APPENDIX II: EXPERIMENTS
|
| 518 |
+
|
| 519 |
+
We provide the full detail of the experiments. We uses non-i.i.d. versions for MNIST and CIFAR-10, which are described as follows:
|
| 520 |
+
|
| 521 |
+
# B.1 MNIST
|
| 522 |
+
|
| 523 |
+
We study image classification of handwritten digits 0-9 in MNIST and modify the MNIST dataset to a non-i.i.d. version.
|
| 524 |
+
|
| 525 |
+
To impose statistical heterogeneity, we split the data based on the digits $(p)$ they contain in their dataset. We distribute the data to $m = 100$ workers such that each worker contains only a certain class of digits with the same number of training/test samples. For example, for $p = 1$ , each worker only has training/testing samples with one digit, which causes heterogeneity among different workers. For $p = 10$ , each worker has samples with 10 digits, which is essentially i.i.d. case. In this way, we can use the digits in worker's local dataset to represent the non-i.i.d. degree qualitatively. In each communication round, 100 workers run $K$ epochs locally in parallel and then the server samples $n$ workers for aggregation and update. We make a grid-search experiments for the hyper-parameters as shown in Table 3.
|
| 526 |
+
|
| 527 |
+
Table 3: Hyper-parameters Tuning.
|
| 528 |
+
|
| 529 |
+
<table><tr><td>Server Learning Rate</td><td>η ∈ {1, 10}</td></tr><tr><td>Client Learning Rate</td><td>ηL ∈ {0.001, 0.01, 0.1}</td></tr><tr><td>Local Epochs</td><td>K ∈ {1, 5, 10}</td></tr><tr><td>Clients Partition Number</td><td>n ∈ {10, 50, 100}</td></tr><tr><td>Non-i.i.d. Degree</td><td>p ∈ {1, 2, 5, 10}</td></tr></table>
|
| 530 |
+
|
| 531 |
+
We run three models: multinomial logistic regression, fully-connected network with two hidden layers (2NN) (two 200 neurons hidden layers with ReLU followed by an output layer), convolutional neural network (CNN), as shown in Table 4. The results are shown in Figures 2, 3 and 4.
|
| 532 |
+
|
| 533 |
+
Table 4: CNN Architecture for MNIST.
|
| 534 |
+
|
| 535 |
+
<table><tr><td>Layer Type</td><td>Size</td></tr><tr><td>Convolution + ReLu</td><td>5 × 5 × 32</td></tr><tr><td>Max Pooling</td><td>2 × 2</td></tr><tr><td>Convolution + ReLu</td><td>5 × 5 × 64</td></tr><tr><td>Max Pooling</td><td>2 × 2</td></tr><tr><td>Fully Connected + ReLu</td><td>1024 × 512</td></tr><tr><td>Fully Connected</td><td>512 × 10</td></tr></table>
|
| 536 |
+
|
| 537 |
+
# B.2 CIFAR-10
|
| 538 |
+
|
| 539 |
+
Unless stated otherwise, we use the following default parameter setting: the server learning rate and client learning rate are set to $\eta = 1.0$ and $\eta_{L} = 0.1$ , respectively. The local epochs is set to $K = 10$ . The total number of clients is set to 100, and the clients partition number is set to $n = 10$ . We use the same strategy to distribute the data over clients as suggested in McMahan et al. (2016). For the i.i.d. setting, we evenly partition all the training data among all clients, i.e., each client observes 500 data; for the non-i.i.d. setting, we first sort the training data by label, then divide all the training data into 200 shards of size 250, and randomly assign two shards to each client. For the CIFAR-10 dataset, we train our classifier with the ResNet model. The results are shown in Figure 5 and Figure 6.
|
| 540 |
+
|
| 541 |
+
# B.3 DISCUSSION
|
| 542 |
+
|
| 543 |
+
Impact of non-i.i.d. datasets: Figure 2 shows the results of training loss (top) and test accuracy (bottom) for three models under different non-i.i.d. datasets with full and partial worker participation
|
| 544 |
+
|
| 545 |
+

|
| 546 |
+
|
| 547 |
+

|
| 548 |
+
|
| 549 |
+

|
| 550 |
+
|
| 551 |
+

|
| 552 |
+
(a) LR
|
| 553 |
+
|
| 554 |
+

|
| 555 |
+
(b) 2NN
|
| 556 |
+
|
| 557 |
+

|
| 558 |
+
(c) CNN
|
| 559 |
+
|
| 560 |
+

|
| 561 |
+
Figure 2: Training loss (top) and test accuracy (bottom) for three models on MNIST with hyperparameters setting: local learning rate 0.1, global learning rate 1.0, local steps 5 epochs.
|
| 562 |
+
|
| 563 |
+

|
| 564 |
+
|
| 565 |
+

|
| 566 |
+
|
| 567 |
+

|
| 568 |
+
(a) LR
|
| 569 |
+
|
| 570 |
+

|
| 571 |
+
(b) 2NN
|
| 572 |
+
|
| 573 |
+

|
| 574 |
+
(c) CNN
|
| 575 |
+
Figure 3: Training loss (top) and test accuracy (bottom) for three models on MNIST with hyperparameters setting: local learning rate 0.1, global learning rate 1.0, worker number 100.
|
| 576 |
+
|
| 577 |
+
on MNIST. We can see that the FedAvg algorithm converges under non-i.i.d. datasets with a proper learning rate choice in these cases. We believe that the major challenge in FL is the non-i.i.d. datasets. For these datasets with a lower degree of non-i.i.d., the FedAvg algorithm can achieve a good result compared with the i.i.d. case. For example, when the local dataset in each worker has five digits $(p = 5)$ with full (partial) worker participation, the FedAvg algorithm achieves a convergence speed comparable with that of the i.i.d. case $(p = 10)$ . This result can be observed in Figure 2 for all three models. As the degree of non-i.i.d. datasets increases, its negative impact on the convergence is becoming more obvious. The higher the degree of non-i.i.d., the slower the convergence speed. As the non-i.i.d. degree increases (from case $p = 10$ to case $p = 1$ ), it is obvious that the training loss is increasing and test accuracy is decreasing. For these with high degree of non-i.i.d., the convergence curves oscillate and are highly unstable. This trend is more obvious for complex models such as CNN in Figure 2(c).
|
| 578 |
+
|
| 579 |
+
Impact of worker number: For full worker participation, the server can have an accurate estimation of the system heterogeneity after receiving the updates for all workers and neutralize this heterogeneity in each communication round. However, partial worker participation introduces another source of randomness, which leads to zigzagging convergence curves and slower convergence. In each
|
| 580 |
+
|
| 581 |
+

|
| 582 |
+
|
| 583 |
+

|
| 584 |
+
|
| 585 |
+

|
| 586 |
+
|
| 587 |
+

|
| 588 |
+
(a) LR
|
| 589 |
+
|
| 590 |
+

|
| 591 |
+
(b) 2NN
|
| 592 |
+
|
| 593 |
+

|
| 594 |
+
(c) CNN
|
| 595 |
+
|
| 596 |
+

|
| 597 |
+
Figure 4: Training loss (top) and test accuracy (bottom) for three models on MNIST with hyperparameters setting: local learning rate 0.1, global learning rate 1.0, worker number 10.
|
| 598 |
+
(a) IID.
|
| 599 |
+
|
| 600 |
+

|
| 601 |
+
(b) Non-IID.
|
| 602 |
+
|
| 603 |
+

|
| 604 |
+
Figure 5: Test accuracy with respect to worker number on CIFAR-10 dataset.
|
| 605 |
+
(a) IID.
|
| 606 |
+
Figure 6: Test accuracy with respect to different local steps on CIFAR-10 dataset.
|
| 607 |
+
|
| 608 |
+

|
| 609 |
+
(b) Non-IID.
|
| 610 |
+
|
| 611 |
+
communication round, the server can only receive a subset of workers based on the sampling strategy. So the server could only have a coarse estimation of the system heterogeneity and might not be able to neutralize the heterogeneity among different workers for partial worker participation. This problem is more prominent for highly non-i.i.d. datasets. It is not unlikely that the digits in these datasets among all active workers are only a proper subset of the total 10 digits in the original MNIST dataset, especially with highly non-i.i.d. datasets. For example, for $p = 1$ with 10 workers in each
|
| 612 |
+
|
| 613 |
+
communication round, it is highly likely that the datasets formed by these ten workers only includes certain small number of digits (say, 4 or 5) rather than total 10 digits. But for $p = 5$ , it is the opposite, that is, the digits in these datasets among these 10 workers are highly likely to be 10. So in each communication round, the server can mitigate system heterogeneity since it covers the training samples with all 10 digits. This trend is more obvious for complex models and datasets given the dramatic drop of test accuracy in the result of CIFAR-10 in Figure 5.
|
| 614 |
+
|
| 615 |
+
The sample strategy here is random sampling with equal probability without replacement. In practice, the workers need to be in certain states in order to be able to participate in FL (e.g., in charging or idle states, etc.(Eichner et al., 2019)). Therefore, care must be taken in sampling and enlisting workers in practice. We believe that the joint design of sampling schemes, number of workers and the FedAvg algorithm will have a significant impact on the convergence, which needs further investigations.
|
| 616 |
+
|
| 617 |
+
Impact of local steps: Figure 3 and Figure 4 shows the results of training loss (top) and test accuracy (bottom) for three models under different local steps with full and partial worker participation respectively. Figure 6 shows the impact of local steps in CIFAR-10. One open question of FL is that whether the local steps help the convergence or not. Li et al. (2019b) showed a convergence rate $\mathcal{O}\left(\frac{K}{T}\right)$ , i.e., the local steps may hurt the convergence for full and partial worker participation. In this two figures, we can see that local steps could help the convergence for both full and partial worker participation. However, it only has a slight effect on the convergence compared to the effects of non-i.i.d. datasets and number of workers.
|
| 618 |
+
|
| 619 |
+
Comparison with SCAFFOLD: We compare SCAFFOLD (Karimireddy et al., 2019) with the generalized FedAVg algorithm in this paper in terms of communication rounds, total communication overloads and estimated wall-clock time to achieve certain test accuracy in Table 2. We run the experiments using the same GPU (NVIDIA V100) to ensure the same conditions. Here, we give a specific comparison for these two algorithms under exact condition. Note that we divide the total training time to two parts: the computation time when the worker trains the local model and the communication time when information exchanges between the worker and server. We only compare the computation time and communication time with a fixed bandwidth $20MB / s$ for both uploading and downloading connections. As shown in Figure 7, to achieve $\epsilon = 75\%$ , SCAFFOLD performs less communication round due to the variance reduction techniques. That is, it spends less time on computation. However, it needs to communicate as twice as the FedAvg since the control variate to perform variance reduction in each worker needs to update in each round. In this way, the communication time would be largely prolonged.
|
| 620 |
+
|
| 621 |
+

|
| 622 |
+
Figure 7: Wall-clock time to achieve test accuracy $\epsilon = 75\%$ on CIFAR-10 dataset.
|
achievinglinearspeedupwithpartialworkerparticipationinnoniidfederatedlearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1e5d080e509febc72afdd8d26ff20ca07c35baf50121be7a0ff243d6c6377cf2
|
| 3 |
+
size 1814058
|
achievinglinearspeedupwithpartialworkerparticipationinnoniidfederatedlearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cdbe23db6c4abcc47529258388153552b5836566747b81d02ccea1b3fd64639f
|
| 3 |
+
size 842127
|
acritiqueofselfexpressivedeepsubspaceclustering/b7e280b4-6940-4a21-8b4c-09799d3f6573_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eadcb2606252cbbcdbf321185d889a528e6f820823aa772e9065a80089d06966
|
| 3 |
+
size 153176
|
acritiqueofselfexpressivedeepsubspaceclustering/b7e280b4-6940-4a21-8b4c-09799d3f6573_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ce8cf696b4b01a4ddb2ef33f67249fe363153f657ab34ccdb37231063773d557
|
| 3 |
+
size 177637
|
acritiqueofselfexpressivedeepsubspaceclustering/b7e280b4-6940-4a21-8b4c-09799d3f6573_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:66b8000512797d4d9ddf10de287d59e688c8c3db8755a8733577147808cc63d0
|
| 3 |
+
size 686990
|
acritiqueofselfexpressivedeepsubspaceclustering/full.md
ADDED
|
@@ -0,0 +1,616 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A CRITIQUE OF SELF-EXPRESSIVE DEEP SUBSPACE CLUSTERING
|
| 2 |
+
|
| 3 |
+
Benjamin D. Haeffele
|
| 4 |
+
|
| 5 |
+
Mathematical Institute for Data Science Johns Hopkins University
|
| 6 |
+
|
| 7 |
+
Baltimore, MD, USA
|
| 8 |
+
|
| 9 |
+
bhaeffele@jhu.edu
|
| 10 |
+
|
| 11 |
+
Chong You
|
| 12 |
+
|
| 13 |
+
Department of Electrical Engineering and Computer Sciences
|
| 14 |
+
University of California, Berkeley
|
| 15 |
+
|
| 16 |
+
Berkeley, CA, USA
|
| 17 |
+
|
| 18 |
+
cyou@berkeley.edu
|
| 19 |
+
|
| 20 |
+
René Vidal
|
| 21 |
+
|
| 22 |
+
Department of Biomedical Engineering
|
| 23 |
+
|
| 24 |
+
Johns Hopkins University
|
| 25 |
+
|
| 26 |
+
Baltimore, MD, USA
|
| 27 |
+
|
| 28 |
+
# ABSTRACT
|
| 29 |
+
|
| 30 |
+
Subspace clustering is an unsupervised clustering technique designed to cluster data that is supported on a union of linear subspaces, with each subspace defining a cluster with dimension lower than the ambient space. Many existing formulations for this problem are based on exploiting the self-expressive property of linear subspaces, where any point within a subspace can be represented as linear combination of other points within the subspace. To extend this approach to data supported on a union of non-linear manifolds, numerous studies have proposed learning an embedding of the original data using a neural network which is regularized by a self-expressive loss function on the data in the embedded space to encourage a union of linear subspaces prior on the data in the embedded space. Here we show that there are a number of potential flaws with this approach which have not been adequately addressed in prior work. In particular, we show the model formulation is often ill-posed in that it can lead to a degenerate embedding of the data, which need not correspond to a union of subspaces at all and is poorly suited for clustering. We validate our theoretical results experimentally and also repeat prior experiments reported in the literature, where we conclude that a significant portion of the previously claimed performance benefits can be attributed to an ad-hoc post processing step rather than the deep subspace clustering model.
|
| 31 |
+
|
| 32 |
+
# 1 INTRODUCTION AND BACKGROUND
|
| 33 |
+
|
| 34 |
+
Subspace clustering is a classical unsupervised learning problem, where one wishes to segment a given dataset into a prescribed number of clusters, and each cluster is defined as a linear (or affine) subspace with dimension lower than the ambient space. There have been a wide variety of approaches proposed in the literature to solve this problem (Vidal et al., 2016), but a large family of state-of-the-art approaches are based on exploiting the self-expressive property of linear subspaces. That is, if a point lies in a linear subspace, then it can be represented as a linear combination of other points within the subspace. Based on this fact, a wide variety of methods have been proposed which, given a dataset $\mathbf{Z} \in \mathbb{R}^{d \times N}$ of $N$ $d$ -dimensional points, find a matrix of coefficients $\mathbf{C} \in \mathbb{R}^{N \times N}$ by solving the problem:
|
| 35 |
+
|
| 36 |
+
$$
|
| 37 |
+
\min _ {\mathbf {C} \in \mathbb {R} ^ {N \times N}} \left\{F (\mathbf {Z}, \mathbf {C}) \equiv \frac {1}{2} \| \mathbf {Z} \mathbf {C} - \mathbf {Z} \| _ {F} ^ {2} + \lambda \theta (\mathbf {C}) = \frac {1}{2} \langle \mathbf {Z} ^ {\top} \mathbf {Z}, (\mathbf {C} - \mathbf {I}) (\mathbf {C} - \mathbf {I}) ^ {\top} \rangle + \lambda \theta (\mathbf {C}) \right\}. \tag {1}
|
| 38 |
+
$$
|
| 39 |
+
|
| 40 |
+
Here, the first term $\| \mathbf{Z}\mathbf{C} - \mathbf{C}\| _F^2$ captures the self-expressive property by requiring every datapoint to represent itself as an approximate linear combination of other points, i.e., $\mathbf{Z}_i\approx \mathbf{Z}\mathbf{C}_i$ where $\mathbf{Z}_i$ and $\mathbf{C}_i$ are the $i^{\mathrm{th}}$ columns of $\mathbf{Z}$ and $\mathbf{C}$ , respectively. The second term, $\theta (\mathbf{C})$ , is some regularization function designed to encourage each data point to only select other points within the
|
| 41 |
+
|
| 42 |
+
correct subspace in its representation and to avoid trivial solutions (such as $\mathbf{C} = \mathbf{I}$ ). Once the $\mathbf{C}$ matrix has been solved for, one can then define a graph affinity between data points, typically based on the magnitudes of the entries of $\mathbf{C}$ , and use an appropriate graph-based clustering method (e.g., spectral clustering (von Luxburg, 2007)) to produce the final clustering of the data points.
|
| 43 |
+
|
| 44 |
+
One of the first methods to utilize this approach was Sparse Subspace Clustering (SSC) (Elhamifar & Vidal, 2009; 2013), where $\theta$ takes the form $\theta_{SSC}(\mathbf{C}) = \| \mathbf{C}\|_1 + \delta(\mathrm{diag}(\mathbf{C}) = \mathbf{0})$ , with $\|\cdot\|_1$ denoting the $\ell_1$ norm and $\delta$ an indicator function which takes value $\infty$ if an element of the diagonal of $\mathbf{C}$ is non-zero and 0 otherwise. By regularizing $\mathbf{C}$ to be sparse, a point must represent itself using the smallest number of other points within the dataset, which in turn ideally requires a point to only select other points within its own subspace in the representation. Likewise other variants, with Low-Rank Representation (LRR) (Liu et al., 2013), Low-Rank Subspace Clustering (LRSC) (Vidal & Favaro, 2014) and Elastic-net Subspace Clustering (EnSC) (You et al., 2016) being well-known examples, take the same form as (1) with different choices of regularization. For example, $\theta_{LRR}(\mathbf{C}) = \| \mathbf{C}\|_*$ and $\theta_{EnSC}(\mathbf{C}) = \| \mathbf{C}\|_1 + \tau \| \mathbf{C}\|_F^2 + \delta(\mathrm{diag}(\mathbf{C}) = \mathbf{0})$ , where $\|\cdot\|_*$ denotes the nuclear norm (sum of the singular values). A significant advantage of the majority of these methods is that it can be proven (typically subject to some technical assumptions regarding the angles between the underlying subspaces and the distribution of the sampled data points within the subspaces) that the optimal $\mathbf{C}$ matrix in (1) will be "correct" in the sense that if $\mathbf{C}_{i,j}$ is non-zero then the $i^{\text{th}}$ and $j^{\text{th}}$ columns of $\mathbf{Z}$ lie in the same linear subspace (Soltanolkotabi & Candès, 2012; Lu et al., 2012; Elhamifar & Vidal, 2013; Soltanolkotabi et al., 2014; Wang et al., 2015; Wang & Xu, 2016; You & Vidal, 2015a;b; Yang et al., 2016; Tsakiris & Vidal, 2018; Li et al., 2018; You et al., 2019; Robinson et al., 2019), which has led to these methods achieving state-of-the-art performance in many applications.
|
| 45 |
+
|
| 46 |
+
# 1.1 SELF-EXPRESSIVE DEEP SUBSPACE CLUSTERING
|
| 47 |
+
|
| 48 |
+
Although subspace clustering techniques based on self-expression display strong empirical performance and provide theoretical guarantees, a significant limitation of these techniques is the requirement that the underlying dataset needs to be approximately supported on a union of linear subspaces. This has led to a strong motivation to extend these techniques to more general datasets, such as data supported on a union of non-linear low-dimensional manifolds. From inspection of the right side of (1), one can observe that the only dependence on the data $\mathbf{Z}$ comes in the form of the Gram matrix $\mathbf{Z}^{\top}\mathbf{Z}$ . As a result, self-expressive subspace clustering techniques are amendable to the "kernel-trick", where instead of taking an inner product kernel between data points, one can instead use a general kernel $\kappa (\cdot ,\cdot)$ (Patel & Vidal, 2014). Of course, such an approach comes with the traditional challenge of how to select an appropriate kernel so that the embedding of the data in the Hilbert space associated with the choice of kernel results in a union of linear subspaces.
|
| 49 |
+
|
| 50 |
+
The first approach to propose learning an appropriate embedding of an initial dataset $\mathbf{X} \in \mathbb{R}^{d_x \times N}$ (which does not necessarily have a union of subspaces structure) was given by Patel et al. (2013; 2015) who proposed first projecting the data into a lower dimensional space via a learned linear projector, $\mathbf{Z} = \mathbf{P}_l\mathbf{X}$ , where $\mathbf{P}_l \in \mathbb{R}^{d \times d_x}$ ( $d < d_x$ ) is also optimized over in addition to $\mathbf{C}$ in (1). To ensure that sufficient information about the original data $\mathbf{X}$ is preserved in the low-dimensional embedding $\mathbf{Z}$ , the authors further required that the linear projector satisfy the constraint that $\mathbf{P}_l\mathbf{P}_l^\top = \mathbf{I}$ and added an additional term to the objective with form $\| \mathbf{X} - \mathbf{P}_l^\top\mathbf{P}_l\mathbf{X}\|_F^2$ . However, since the projector is linear, the approach is not well suited for nonlinear manifolds, unless it is augmented with a kernel embedding, which again requires choosing a suitable kernel.
|
| 51 |
+
|
| 52 |
+
More recently, given the success of deep neural networks, a large number of studies Peng et al. (2017); Ji et al. (2017); Zeng et al. (2019b;a); Xie et al. (2020); Sun et al. (2019); Li et al. (2019); Yang et al. (2019); Jiang et al. (2019); Tang et al. (2018); Kheirandishfard et al. (2020b); Zhou et al. (2019); Jiang et al. (2018); Abavisani & Patel (2018); Zhou et al. (2018); Zhang et al. (2018; 2019b;a); Kheirandishfard et al. (2020a) have attempted to learn an appropriate embedding of the data (which ideally would have a union of linear subspaces structure) via a neural network, $\Phi_E(\mathbf{X},\mathcal{W}_e)$ , where $\mathcal{W}_e$ denotes the parameters of a network mapping defined by $\Phi_E$ , which takes a dataset $\mathbf{X}\in \mathbb{R}^{d_x\times N}$ as input. In an attempt to encourage the embedding of the data, $\Phi_E(\mathbf{X},\mathcal{W}_e)$ , to have this union of subspaces structure, these approaches minimize a self-expressive loss term, with form given in (1), on the embedded data, and a large majority of these proposed techniques can be
|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
Figure 1: Illustration of our theoretical results. The goal of the SEDSC model is to train a network (typically an auto-encoder) to map data from a union of non-linear manifolds (Left) to a union of linear subspaces (Center). However, we show that for many of the formulations that have been proposed in the literature the global optimum of the model will have a degenerate geometry in the embedded space. For example, in the Dataset and Channel/Batch normalization schemes Theorem 2 shows that the globally optimal geometry will have all points clustered near the origin with the exception of two points, which will be copies of each other (to within a sign-flip) (Right).
|
| 56 |
+
|
| 57 |
+
described by the general form:
|
| 58 |
+
|
| 59 |
+
$$
|
| 60 |
+
\min _ {\mathcal {W} _ {e}, \mathbf {C}} \gamma F (\mathbf {Z}, \mathbf {C}) + g (\mathbf {Z}, \mathbf {X}, \mathbf {C}) \text {s u b j e c t t o} \mathbf {Z} = \Phi_ {E} (\mathbf {X}, \mathcal {W} _ {e}) \tag {2}
|
| 61 |
+
$$
|
| 62 |
+
|
| 63 |
+
where $g$ is some function designed to discourage trivial solutions (for example $\Phi_E(\mathbf{X},\mathcal{W}_e) = \mathbf{0}$ ) and $\gamma > 0$ is some hyper-parameter to balance the terms.
|
| 64 |
+
|
| 65 |
+
Several different choices of $g$ have been proposed in the literature. The first is to place some form of normalization directly on $\mathbf{Z}$ . For example, Peng et al. (2017) propose an Instance Normalization regularization, $g(\mathbf{Z},\mathbf{X},\mathbf{C}) = \sum_{i = 1}^{N}(\mathbf{Z}_i^\top \mathbf{Z}_i - 1)^2$ , which attempts to constrain the norm of the embedded data points to be 1. Likewise, one could also consider Dataset Normalization schemes, which bound the norm of the entire embedded representation $\| \mathbf{Z}\| _F^2\geq \tau$ or Channel/Batch Normalization schemes, which bound the norm of a channel of the embedded representation (i.e., a row of $\mathbf{Z}$ ), $\| \mathbf{Z}^i\| _F^2\geq \tau$ , $\forall i$ . We note that this is quite similar to the common Batch Norm operator (Ioffe & Szegedy, 2015) used in neural network training which attempts to constrain each row of $\mathbf{Z}$ to have zero mean and constant norm.
|
| 66 |
+
|
| 67 |
+
Another popular form of $g$ is to also train a decoding network $\Phi_{D}(\cdot, \mathcal{W}_{d})$ with parameters $\mathcal{W}_{d}$ to map the self-expressive representation, $\Phi_{E}(\mathbf{X}, \mathcal{W}_{e})\mathbf{C}$ , back to the original data to ensure that sufficient information is preserved in the self-expressive representation to recover the original data. We will refer to this as Autoencoder Regularization. This idea is essentially a generalization of the previously discussed work, which considered constrained linear encoder/decoder mappings (Patel et al., 2013; Patel & Vidal, 2014; Patel et al., 2015), to non-linear autoencoders neural networks and was first proposed by the authors of Ji et al. (2017). The problem takes the general form:
|
| 68 |
+
|
| 69 |
+
$$
|
| 70 |
+
\min _ {\mathcal {W} _ {e}, \mathcal {W} _ {d}, \mathbf {C}} \gamma F (\mathbf {Z}, \mathbf {C}) + \ell (\mathbf {X}, \Phi_ {D} (\mathbf {Z} \mathbf {C}, \mathcal {W} _ {d})) \text {s u b j e c t t o} \mathbf {Z} = \Phi_ {E} (\mathbf {X}, \mathcal {W} _ {e}), \tag {3}
|
| 71 |
+
$$
|
| 72 |
+
|
| 73 |
+
where the first term is the standard self-expressive subspace clustering loss applied to the embedded representation, and the second term is a standard auto-encoder loss, with $\ell$ typically chosen to be the squared loss. Note that here both the encoding/decoding network and the optimal self-expression encoding, $\mathbf{C}$ , are trained jointly, and once problem (3) is solved one can use the recovered $\mathbf{C}$ matrix directly for clustering.
|
| 74 |
+
|
| 75 |
+
Using the general formulation in (2) and the popular specific case in (3), Self-Expressive Deep Subspace Clustering (SEDSC) has been applied to a variety of applications, but there is relatively little that it known about it from a theoretical standpoint. Initial formulations for SEDSC were guided by the intuition that if the dataset is drawn from a union of linear subspaces, then solving problem (1) is known to induce desirable properties in $\mathbf{C}$ for clustering. By extension one might assume that if one also optimizes over the geometry of the learned embedding $(\mathbf{Z})$ this objective might induce a desirable geometry in the embedded space (e.g., a union of linear subspaces). However, a vast majority of the prior theoretical analysis for problems of the form in (1) only considers the case where the data is held fixed and analyzes the properties of the optimal $\mathbf{C}$ matrix. Due to the well-known fact that neural networks are capable of producing highly-expressive mapping functions (and hence a network could produce many potential values for $\mathbf{Z}$ ), the use of a model such as (2)/(3) is essentially using (1) as a regularization function on $\mathbf{Z}$ to encourage a union of subspaces geometry. To
|
| 76 |
+
|
| 77 |
+
date, however, models such as (2)/(3) have been guided largely by intuition and significant questions remain regarding what type of data geometry is encouraged by $F(\mathbf{Z}, \mathbf{C})$ when one optimizes over both the encoding matrix, $\mathbf{C}$ , and the network producing the embedded data representation, $\mathbf{Z}$ .
|
| 78 |
+
|
| 79 |
+
# 1.2 PAPER CONTRIBUTIONS
|
| 80 |
+
|
| 81 |
+
Here we explore these questions via theoretical analysis where we show that the use of $F(\mathbf{Z}, \mathbf{C})$ as a regularization function when learning a kernel from the data in an attempt to promote beneficial data geometries in the embedded space, as is done in (2), is largely insufficient in the sense that the optimal data geometries are trivial and not conducive to successful clustering in most cases. Specifically, we first note a basic fact that the Autoencoder Regularization formulation in (3) is typically ill-posed for most commonly used networks if constraints are not placed on the magnitude of the embedded data, $\Phi_E(\mathbf{X}, \mathcal{W}_e)$ , either through regularization/constraints on the autoencoder weights/architecture or an explicit normalization of $\Phi_E(\mathbf{X}, \mathcal{W}_e)$ , such as in the Instance/Batch/Dataset Normalization schemes. Then, even assuming that the embedded representation has been suitably normalized, we show that the optimal embedded data geometry encouraged by $F(\mathbf{Z}, \mathbf{C})$ is trivial in various ways, which will depend on how the data is normalized. We illustrate our theoretical predictions with experiments on both real and synthetic data. Further, we show experimentally that much of the claimed performance benefit of the SEDSC model reported in previous work can be attributed to an ad-hoc post-processing of the C matrix first proposed in Ji et al. (2017).
|
| 82 |
+
|
| 83 |
+
Notation. We will denote matrices with capital boldfaced letters, $\mathbf{Z}$ , vectors which are not rows/columns of a larger matrix with lower-case boldfaced letters, $\mathbf{z}$ , and sets with calligraphic letters, $\mathcal{Z}$ . The $i^{\mathrm{th}}$ row of a matrix will be denoted with a superscript, $\mathbf{Z}^i$ ; the $i^{\mathrm{th}}$ column of a matrix will be denoted with a subscript, $\mathbf{Z}_i$ ; the $(i,j)^{\mathrm{th}}$ entry of a matrix will be denoted as $\mathbf{Z}_{i,j}$ ; and the $i^{\mathrm{th}}$ entry of a vector will be denoted as $\mathbf{z}_i$ . We will denote the minimum singular value of a matrix $\mathbf{Z}$ as $\sigma_{\min}(\mathbf{Z})$ , and we will denote the nuclear, Frobenius, $\ell_p$ , and Schatten- $p$ norms for a matrix/vector as $\| \mathbf{Z}\|_*$ , $\| \mathbf{Z}\|_F$ , $\| \mathbf{Z}\|_p$ , and $\| \mathbf{Z}\|_{S_p}$ respectively. $\delta(cnd)$ denotes an indicator function with value 0 if condition $cnd$ is true and $\infty$ otherwise.
|
| 84 |
+
|
| 85 |
+
# 2 THEORETICAL ANALYSIS
|
| 86 |
+
|
| 87 |
+
# 2.1 BASIC SCALING ISSUES
|
| 88 |
+
|
| 89 |
+
We begin our analysis of the SEDSC model by considering the most popular formulation, which is to employ Autoencoder Regularization as in (3). Specifically, we note that without any regularization on the autoencoder network parameters $(\mathcal{W}_e,\mathcal{W}_d)$ or any normalization placed on the embedded representation, $\Phi_E(\mathbf{X},\mathcal{W}_e)$ , the formulation in (3) is often ill-posed in the sense that the value of $F(\Phi_E(\mathbf{X},\mathcal{W}_e),\mathbf{C})$ can be made arbitrarily small without changing the value of the autoencoder loss by simply scaling-down the weights in the encoding network and scaling-up the weights in the decoding network in a way which doesn't change the output of the autoencoder but reduces the magnitude of the embedded representation. As we will see, a sufficient condition for this to be possible is when the non-linearity in the final layer of the encoder is positively-homogeneous<sup>2</sup>. We further note that most commonly used non-linearities in neural networks are positively homogenous, with Rectified Linear Units (ReLUs), leaky RLUs, and max-pooling being common examples. As a result, most autoencoder architectures employed for SEDSC will require some form of regularization or normalization for the $F$ term to have any impact on the geometry of the embedded representation (other than trivially becoming smaller in magnitude), though many proposed formulations do not take this issue into account.
|
| 90 |
+
|
| 91 |
+
As a basic example, consider fully-connected single-hidden-layer networks for the encoder/decoder, $\Phi_E(\mathbf{X},\mathcal{W}_e) = \mathbf{W}_e^2 (\mathbf{W}_e^1\mathbf{X})_+$ and $\Phi_D(\mathbf{Z},\mathcal{W}_d) = \mathbf{W}_d^2 (\mathbf{W}_d^1\mathbf{Z})_+$ , where $(x)_{+} = \max \{x,0\}$ denotes the Rectified Linear Unit (ReLU) non-linearity applied entry-wise. Then, note that because the ReLU function is positively homogeneous of degree 1 for any $\alpha \geq 0$ one has $\Phi_E(\mathbf{X},\alpha \mathcal{W}_e) = (\alpha \mathbf{W}_e^2)(\alpha \mathbf{W}_e^1\mathbf{X})_+ = \alpha^2\mathbf{W}_e^2 (\mathbf{W}_e^1\mathbf{X})_+ = \alpha^2\Phi_E(\mathbf{X},\mathcal{W}_e)$ and similarly for $\Phi_D$ . As a result one can scale $\mathcal{W}_e$ by any $\alpha >0$ and $\mathcal{W}_d$ by $\alpha^{-1}$ without changing the output of
|
| 92 |
+
|
| 93 |
+
the autoencoder, $\Phi_D(\Phi_E(\mathbf{X},\alpha \mathcal{W}_e),\alpha^{-1}\mathcal{W}_d) = \Phi_D(\Phi_E(\mathbf{X},\mathcal{W}_e),\mathcal{W}_d)$ , but $\| \Phi_E(\mathbf{X},\alpha \mathcal{W}_e)\| _F = \alpha^2\| \Phi_E(\mathbf{X},\mathcal{W}_e)\| _F$ . From this, taking $\alpha$ to be arbitrarily small the magnitude of the embedding can also be made arbitrarily small, so if $\theta (\mathbf{C})$ in (1) is something like a norm (as is typically the case) the value of $F(\mathbf{Z},\mathbf{C})$ in (3) can also be made arbitrarily small without changing the reconstruction loss of the autoencoder $\ell$ . This implies one is simply training an autoencoder without any contribution from the $F(\mathbf{Z},\mathbf{C})$ term (other than trying to reduce the magnitude of the embedded representation). This basic idea can be easily generalized and formalized in the following statement (all proofs are provided in the Supplementary Material):
|
| 94 |
+
|
| 95 |
+
Proposition 1. Consider the objective in (3) and suppose $\theta(\mathbf{C})$ in (1) is a function which achieves its minimum at $\mathbf{C} = \mathbf{0}$ and satisfies $\theta(\mu \mathbf{C}) < \theta(\mathbf{C}), \forall \mu \in (0,1)$ . If for any choice of $(\mathcal{W}_d, \mathcal{W}_e)$ and $\tau_1, \tau_2 \in (0,1)$ there exists $(\hat{\mathcal{W}}_d, \hat{\mathcal{W}}_e)$ such that $\Phi_E(\mathbf{X}, \hat{\mathcal{W}}_e) = \tau_1 \Phi_E(\mathbf{X}, \mathcal{W}_e)$ and $\Phi_D(\tau_2 \mathbf{Z}, \hat{\mathcal{W}}_d) = \Phi_D(\mathbf{Z}, \mathcal{W}_d)$ then the $F$ term in (3) can be made arbitrarily small without changing the value of the loss function $\ell$ . Further, if the final layer of the encoding network is positively-homogeneous with degree $p \neq 0$ , such a $(\hat{\mathcal{W}}_d, \hat{\mathcal{W}}_e)$ will always exist simply by scaling the network weights of the linear (or affine) operator parameters.
|
| 96 |
+
|
| 97 |
+
In practice, most of the previously cited studies employing the SEDSC model use networks which satisfy the conditions of Proposition 1 without regularization, and we note that such models can never be solved to global optimality (only approach it asymptotically) and are inherently ill-posed. From this, one can conclude that Autoencoder Regularization by itself is often insufficient to prevent trivial solutions to (2). This specific issue can be easily fixed (although prior work often does not) if one ensures that the magnitude of the embedded representation is constrained to be larger than some minimum value - either through regularization/constraints placed on the network weights, such as using weight decay or coupling weights between the encoder and decoder, potentially a different choice of non-linearity on the last layer or the encoder, or explicit normalization of the embedded representation. However, the question remains what geometries are promoted by (1) even if the basic issue described above is corrected, which we explore next.
|
| 98 |
+
|
| 99 |
+
# 2.2 THE EFFECT OF THE AUTOENCODER LOSS
|
| 100 |
+
|
| 101 |
+
Before presenting the remainder of our formal results, we first pause to discuss the effect of the autoencoder loss $(\ell)$ in (3). The use of the autoencoder loss is designed to ensure that the embedded representation $\mathbf{Z}$ retains sufficient information about the original data $\mathbf{X}$ so that the data can be reconstructed by the decoder, but we note that this does not necessarily impose significant constraints on the geometric arrangement of the embedded points in $\mathbf{Z}$ . While there is potentially some constraint on the possible geometries of $\mathbf{Z}$ which is imposed by the choice of encoder/decoder architectures, reasonably expressive encoders and decoders can map a finite number of data points arbitrarily into the embedded space and still decode them accurately, provided the embedding of any two points is still distinct to within some $\epsilon$ perturbation (i.e., $\Phi_E$ is a one-to-one mapping on the points in $\mathbf{X}$ ). Further, because we only evaluate the mapping on a finite set of points $\mathbf{X}$ , this is a much easier (and can be achieved with much simpler networks) than the well-known universal approximation regime of neural networks (which requires good approximation over the entire continuous data domain), and it is well documented that typical network architectures can optimally fit fairly arbitrary finite training data (Zhang et al., 2017).
|
| 102 |
+
|
| 103 |
+
Given the above discussion, in our analysis we will first focus on the setting where the autoencoder is highly expressive. In this regime, the encoder can select weight parameters to produce an essentially arbitrary choice of $\mathbf{Z}$ embedding, and as long as $\Phi_E(\mathbf{X}_i)\neq \Phi_E(\mathbf{X}_j),\forall \mathbf{X}_i\neq \mathbf{X}_j$ then the decoder can exactly recover the original data $\mathbf{X}$ . As a result, for almost any choice of encoder the autoencoder loss term, $\ell$ , in (3) can be exactly minimized, so the optimal network weights for the model in (3) (and likewise (2)) will be those which minimize $F(\mathbf{Z},\mathbf{C})$ (potentially to within some small $\epsilon$ perturbation so that $\Phi_E(\mathbf{X})$ is a one-to-one mapping on the points in $\mathbf{X}$ ). As we already know from Prop 1, this is ill-posed without some additional form of regularization, so in the following subsections we explore optimal solutions to $F(\mathbf{Z},\mathbf{C})$ when one optimizes over both $\mathbf{C}$ and the embedded representation $\mathbf{Z} = \Phi_{E}(\mathbf{X},\mathcal{W}_{e})$ subject to three different constraints on $\mathbf{Z}$ : (1) Dataset Normalization $\| \mathbf{Z}\| _F^2\geq \tau$ , (2) Channel/Batch Normalization $\| \mathbf{Z}^i\| _F\geq \tau \forall i$ , and (3) Instance Normalization $\| \mathbf{Z}_i\| _F = \tau \forall i$ . Finally, after characterizing solutions to $F(\mathbf{Z},\mathbf{C})$ , we give a specific example of a very simple (i.e., not highly expressive) family of architectures which can achieve these solutions,
|
| 104 |
+
|
| 105 |
+
showing that the assumption of highly expressive networks is not necessary for these solutions to be globally optimal.
|
| 106 |
+
|
| 107 |
+
# 2.3 DATASET AND BATCH/CHANNEL NORMALIZATION
|
| 108 |
+
|
| 109 |
+
We will first consider the Dataset and Batch/Channel Normalization schemes, which will both result in very similar optimal solutions for the embedded data geometry. Recall, that this considers when the entire dataset is constrained to have a norm greater than some minimum value in the embedded space (Dataset Normalization) or when the norms of the rows of the dataset are constrained to have a norm greater than some minimum value (Batch/Channel Normalization). We note that the latter case is very closely related to batch normalization (Ioffe & Szegedy, 2015), which requires that each channel/feature (in this case a row of the embedded representation) to have zero mean and constant norm in expectation over the draw of a mini-batch. Additionally, while we do not explicitly enforce a zero-mean constraint, we will see that optimal solutions will exist which have zero mean. Now, if one optimizes $F(\mathbf{Z}, \mathbf{C})$ jointly over $(\mathbf{Z}, \mathbf{C})$ subject to the above constraint(s) on $\mathbf{Z}$ , then the following holds:
|
| 110 |
+
|
| 111 |
+
Theorem 1. Consider the following optimization problems which jointly optimize over $\mathbf{Z}$ and $\mathbf{C}$ :
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\left. \min _ {\mathbf {C}, \mathbf {Z}} \left\{F (\mathbf {Z}, \mathbf {C}) \text {s . t .} \| \mathbf {Z} \| _ {F} ^ {2} \geq \tau \right\} \quad \text {(P 2)} \min _ {\mathbf {C}, \mathbf {Z}} \left\{F (\mathbf {Z}, \mathbf {C}) \text {s . t .} \| \mathbf {Z} ^ {i} \| _ {F} ^ {2} \geq \frac {\tau}{d} \forall i \right\}\right. \tag {4}
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
Then optimal values for $\mathbf{C}$ for both (P1) and (P2) are given by
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
\mathbf {C} ^ {*} \in \underset {\mathbf {C}} {\arg \min } \frac {1}{2} \sigma_ {\min } ^ {2} (\mathbf {C} - \mathbf {I}) \tau + \lambda \theta (\mathbf {C}). \tag {5}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
Moreover, for any optimal $\mathbf{C}^*$ , let $r$ be the multiplicity of the smallest singular value of $\mathbf{C}^* - \mathbf{I}$ and let $\mathbf{Q} \in \mathbb{R}^{N \times r}$ be an orthonormal basis for the subspace spanned by the left singular vectors associated with the smallest singular value of $\mathbf{C}^* - \mathbf{I}$ . Then we have that optimal values for $\mathbf{Z}$ are given by:
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
\mathbf {Z} ^ {*} \in \left\{\mathbf {B Q} ^ {\top}: \mathbf {B} \in \mathbb {R} ^ {d \times r} \cap \mathcal {B} \right\}, \quad \mathcal {B} = \left\{ \begin{array}{l l} \left\{\left\{\mathbf {B}: \| \mathbf {B} \| _ {F} ^ {2} = \tau \right\} \quad \sigma_ {\min } \left(\mathbf {C} ^ {*} - \mathbf {I}\right) > 0 \right. & (\text {P 1}) \\ \left\{\left\{\mathbf {B}: \| \mathbf {B} \| _ {F} ^ {2} \geq \tau \right\} \quad \sigma_ {\min } \left(\mathbf {C} ^ {*} - \mathbf {I}\right) = 0 \right. & \\ \left\{\left\{\mathbf {B}: \| \mathbf {B} ^ {i} \| _ {F} ^ {2} = \frac {\tau}{d}, \forall i \right\} \quad \sigma_ {\min } \left(\mathbf {C} ^ {*} - \mathbf {I}\right) > 0 \right. & (\text {P 2}) \\ \left\{\left\{\mathbf {B}: \| \mathbf {B} ^ {i} \| _ {F} ^ {2} \geq \frac {\tau}{d}, \forall i \right\} \quad \sigma_ {\min } \left(\mathbf {C} ^ {*} - \mathbf {I}\right) = 0 \right. & \end{array} \right.
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
From the above result, one notes from (5) that optimizing $F(\mathbf{Z}, \mathbf{C})$ jointly over both $\mathbf{Z}$ and $\mathbf{C}$ is equivalent to finding a $\mathbf{C}$ which minimizes a trade-off between the minimum singular value of $\mathbf{C} - \mathbf{I}$ and the regularization $\theta(\mathbf{C})$ . Further, we note that if such an optimal $\mathbf{C}$ results in the minimum singular value of $\mathbf{C} - \mathbf{I}$ having a multiplicity of 1, then this implies that every data point in $\mathbf{Z}$ will simply be a scaled version of the same point. Obviously, such an embedding is not useful for subspace clustering. Characterizing optimal solutions to (5) is somewhat complicated in the general case due to the fact that the smallest singular value is a concave function of a matrix and $\theta$ is typically chosen to be a convex regularization function, resulting in the minimization of a convex+concave function. Instead, we will focus on the most commonly used choices of regularization, starting with $\theta_{SSC}(\mathbf{C}) = \| \mathbf{C}\|_1 + \delta (\mathrm{diag}(\mathbf{C}) = \mathbf{0})$ , where we derive the optimal solution in the case where $\sigma_{\min}(\mathbf{C} - \mathbf{I}) = 0$ . We note that this corresponds to the case where $\mathbf{Z}^* = \mathbf{Z}^*\mathbf{C}^*$ which one typically obtains as $\lambda$ in (1) is taken to be small.
|
| 130 |
+
|
| 131 |
+
Theorem 2. Optimal solutions to the problems
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
\left(\mathrm {P} 1\right) \quad \min _ {\mathbf {Z}, \mathbf {C}} \| \mathbf {C} \| _ {1} \text {s . t .} \operatorname {d i a g} (\mathbf {C}) = \mathbf {0}, \mathbf {Z} = \mathbf {Z} \mathbf {C}, \| \mathbf {Z} \| _ {F} ^ {2} \geq \tau \tag {7}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\left(\mathrm {P} 2\right) \quad \min _ {\mathbf {Z}, \mathbf {C}} \| \mathbf {C} \| _ {1} \text {s . t .} \operatorname {d i a g} (\mathbf {C}) = \mathbf {0}, \mathbf {Z} = \mathbf {Z} \mathbf {C}, \| \mathbf {Z} ^ {i} \| _ {F} ^ {2} \geq \frac {\tau}{d} \forall i \tag {8}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
are characterized by the set
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\left(\mathbf {Z} ^ {*}, \mathbf {C} ^ {*}\right) \in \left\{\left[ \begin{array}{l l l} \mathbf {z} & \mathbf {z} & \mathbf {0} _ {d \times N - 2} \end{array} \right] \mathbf {P} \right\} \times \left\{\mathbf {P} ^ {\top} \left[ \begin{array}{c c c c c} 0 & 1 & 0 & \dots & 0 \\ 1 & 0 & 0 & \dots & 0 \\ 0 & 0 & 0 & \dots & 0 \\ \vdots & \vdots & \vdots & \ddots & \\ 0 & 0 & 0 & & 0 \end{array} \right] \mathbf {P} \right\}, \tag {9}
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
where $\mathbf{P} \in \mathbb{R}^{N \times N}$ is an arbitrary signed-permutation matrix and $\mathbf{z} \in \mathbb{R}^d$ is an arbitrary vector which satisfies $\| \mathbf{z} \|_F^2 \geq \tau / 2$ for (P1) and $\mathbf{z}_i^2 \geq \tau / (2d), \forall i \in [1, d]$ for (P2).
|
| 148 |
+
|
| 149 |
+
From the above result, we have shown that if $\mathbf{Z}$ is normalized to have a lower-bounded norm on either the entire embedded representation or for each row, then the effect of the $F(\mathbf{Z},\mathbf{C})$ loss will be largely similar to the situation described by Proposition 1 in the sense that the loss will still attempt to push all of the points to 0 with the exception of two points, which will be copies of each other (potentially to within a sign-flip). Again, the optimal embedded representation is clearly ill-posed for clustering since all but two of the points are trivially driven towards 0 in the embedded space.
|
| 150 |
+
|
| 151 |
+
In addition, we also present a result similar to Theorem 2 when $\mathbf{C}$ is regularized by any Schatten- $p$ norm, which includes two other popular choices of regularization that have appeared in the literature - the Frobenius norm $\theta(\mathbf{C}) = \| \mathbf{C} \|_F$ (for $p = 2$ ) and the nuclear norm $\theta(\mathbf{C}) = \| \mathbf{C} \|_*$ (for $p = 1$ ) - as special cases.
|
| 152 |
+
|
| 153 |
+
Theorem 3. Optimal solutions to the problems
|
| 154 |
+
|
| 155 |
+
$$
|
| 156 |
+
\left(\mathrm {P} 1\right) \quad \min _ {\mathbf {Z}, \mathbf {C}} \| \mathbf {C} \| _ {\mathcal {S} _ {p}} \text {s . t .} \mathbf {Z} = \mathbf {Z} \mathbf {C}, \| \mathbf {Z} \| _ {F} ^ {2} \geq \tau \tag {10}
|
| 157 |
+
$$
|
| 158 |
+
|
| 159 |
+
$$
|
| 160 |
+
\left(\mathrm {P} 2\right) \quad \min _ {\mathbf {Z}, \mathbf {C}} \| \mathbf {C} \| _ {\mathcal {S} _ {p}} \text {s . t .} \mathbf {Z} = \mathbf {Z} \mathbf {C}, \| \mathbf {Z} ^ {i} \| _ {F} ^ {2} \geq \frac {\tau}{d} \forall i, \tag {11}
|
| 161 |
+
$$
|
| 162 |
+
|
| 163 |
+
where $\| \mathbf{C}\|_{S_p}$ is any Schatten- $p$ norm on $\mathbf{C}$ , are characterized by the set
|
| 164 |
+
|
| 165 |
+
$$
|
| 166 |
+
\left(\mathbf {Z} ^ {*}, \mathbf {C} ^ {*}\right) \in \left\{\left(\mathbf {z q} ^ {\top}\right) \times \left(\mathbf {q q} ^ {\top}\right): \mathbf {q} \in \mathbb {R} ^ {N}, \| \mathbf {q} \| _ {F} = 1 \right\} \tag {12}
|
| 167 |
+
$$
|
| 168 |
+
|
| 169 |
+
where $\mathbf{z} \in \mathbb{R}^d$ is an arbitrary vector which satisfies $\| \mathbf{z} \|_F^2 \geq \tau$ for (P1) and $\mathbf{z}_i^2 \geq \frac{\tau}{d}$ , $\forall i$ for (P2).
|
| 170 |
+
|
| 171 |
+
Again, note that this is obviously not a good geometry for successful spectral clustering as all the points in the dataset are simply arranged on a single line and the optimal $\mathbf{C}$ is a rank-one matrix.
|
| 172 |
+
|
| 173 |
+
# 2.4INSTANCE NORMALIZATION
|
| 174 |
+
|
| 175 |
+
To explicitly prevent the case where most of the points in the embedded space are trivially driven to 0 as in the prior two normalization schemes, another potential normalization strategy which has been proposed for (2) is to use Instance Normalization (Peng et al., 2017), where the $\ell_2$ norm of each embedded data point is constrained to be equal to some constant. Here again we will see that this results in somewhat trivial data geometries. Specifically, we will again focus on the choice of the $\theta_{SSC}(\mathbf{C})$ regularization function when we have exact equality, $\mathbf{Z} = \mathbf{Z}\mathbf{C}$ , for simplicity of presentation. From this we have the following result:
|
| 176 |
+
|
| 177 |
+
Theorem 4. Optimal solutions to the problem
|
| 178 |
+
|
| 179 |
+
$$
|
| 180 |
+
\min _ {\mathbf {Z}, \mathbf {C}} \| \mathbf {C} \| _ {1} \text {s . t .} \operatorname {d i a g} (\mathbf {C}) = \mathbf {0}, \quad \mathbf {Z} = \mathbf {Z} \mathbf {C}, \quad \| \mathbf {Z} _ {i} \| _ {F} ^ {2} = \tau \forall i \tag {13}
|
| 181 |
+
$$
|
| 182 |
+
|
| 183 |
+
must have the property that for any column in $\mathbf{Z}^*$ , $\mathbf{Z}_i^*$ , there exists another column, $\mathbf{Z}_j^* (i \neq j)$ , such that $\mathbf{Z}_i^* = s_{i,j}\mathbf{Z}_j^*$ where $s_{i,j} \in \{-1,1\}$ . Further, $\|\mathbf{C}_i^*\|_1 = 1$ $\forall i$ and $\mathbf{C}_{i,j} \neq 0 \Rightarrow \mathbf{Z}_i^* = \pm \mathbf{Z}_j^*$ .
|
| 184 |
+
|
| 185 |
+
The above result is quite intuitive in the sense that because a given point cannot use itself in its representation (due to the $\mathrm{diag}(\mathbf{C}) = \mathbf{0}$ constraint), the next best thing is to have an exact copy of itself in another column. While this result is more conducive to successful clustering in the sense that points which are close in the embedded space are encouraged to 'merge' into a single point, there are still numerous pathological geometries that can result. Specifically, there is no constraint on the number of 'distinct' points in the representation (i.e., the number of vectors which are not copies of each other to within a sign flip), other than it must be less than $N / 2$ . As a result, the optimal $\mathbf{C}^*$ matrix can also contain an arbitrary number (in the range $[1,N / 2]$ ) of connected components in the affinity graph, resulting in somewhat arbitrary spectral clustering.
|
| 186 |
+
|
| 187 |
+
Example of Degenerate Geometry with Simple Networks. In section 2.2 we discussed how if the encoder/decoder are highly expressive then the optimal embedding will approach the solutions we give in our theoretical analysis. Here we show that trivial embeddings can also occur with relatively simple encoders/decoders. Specifically, consider basic encoder/decoder architectures which consist of two affine mappings with a ReLU non-linearity (denoted as $(\cdot)_{+}$ ) on the hidden layer:
|
| 188 |
+
|
| 189 |
+
$$
|
| 190 |
+
\Phi_ {E} (\mathbf {x}, \mathcal {W} _ {e}) = \mathbf {W} _ {e} ^ {2} \left(\mathbf {W} _ {e} ^ {1} \mathbf {x} + \mathbf {b} _ {e} ^ {1}\right) _ {+} + \mathbf {b} _ {e} ^ {2} \quad \Phi_ {D} (\mathbf {z}, \mathcal {W} _ {d}) = \mathbf {W} _ {d} ^ {2} \left(\mathbf {W} _ {d} ^ {1} \mathbf {z} + \mathbf {b} _ {d} ^ {1}\right) _ {+} + \mathbf {b} _ {d} ^ {2} \tag {14}
|
| 191 |
+
$$
|
| 192 |
+
|
| 193 |
+
where the linear operators $(\mathbf{W}$ matrices) can optionally be constrained (for example for convolution operations $\mathbf{W}$ could be required to be a Toeplitz matrix). Now if we have that the embedded dimension $d$ is equal to the data dimension $d_{x}$ we will say that linear operators can express identity on $\mathbf{X}$
|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
Figure 2: Clustering accuracy results for YaleB (38 faces), COIL100, and ORL datasets with (Dashed Lines) and without (Solid Lines) the post-processing step on $\mathbf{C}$ matrix proposed in Ji et al. (2017). (Raw Data) Clustering on the raw data. (Autoenc only) Clustering features from an autoencoder trained without the $F(\mathbf{Z},\mathbf{C})$ term. (Full SEDSC) The full model in (3).
|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
|
| 202 |
+
if there exists parameters $(\mathbf{W}^2, \mathbf{W}^1)$ such that $\mathbf{W}^2 \mathbf{W}^1 \mathbf{X} = \mathbf{X}$ . Note that if the architectures in (14) are fully-connected this implies that for a general $\mathbf{X}$ the number of hidden units is greater than or equal to $d = d_x$ (which can be even smaller if $\mathbf{X}$ is low-rank), while if the linear operators are convolutions, then we only need one convolutional channel (with the kernel being the delta function). Within this setting we have the following result:
|
| 203 |
+
|
| 204 |
+
Proposition 2. Consider encoder and decoder networks with the form given in (14). Then, given any dataset $\mathbf{X} \in \mathbb{R}^{d_x \times N}$ where the linear operators in both the encoder/decoder can express identity on $\mathbf{X}$ and any $\tau > 0$ there exist network parameters $(\mathcal{W}_e, \mathcal{W}_d)$ which satisfy the following:
|
| 205 |
+
|
| 206 |
+
1. Embedded points are arbitrarily close: $\| \Phi_E(\mathbf{X}_i,\mathcal{W}_e) - \Phi_E(\mathbf{X}_j,\mathcal{W}_e)\| \leq \epsilon \forall (i,j)$ and $\forall \epsilon >0$
|
| 207 |
+
2. Embedded points have norm arbitrarily close to $\tau$ : $||\Phi_{E}(\mathbf{X}_{i},\mathcal{W}_{e})||_{F} - \tau |\leq \epsilon \forall i$ and $\forall \epsilon >0$
|
| 208 |
+
3. Embedded points can be decoded exactly: $\Phi_D(\Phi_E(\mathbf{X}_i, \mathcal{W}_e), \mathcal{W}_d) = \mathbf{X}_i$ , $\forall i$ .
|
| 209 |
+
|
| 210 |
+
From the above simple example, we can see that even with very simple network architectures (i.e., not necessarily highly expressive) it is still possible to have solutions which are arbitrarily close to the global optimum described in Theorem 4, in the sense that the points can be made to be arbitrarily close to each other in the embedded space with norm arbitrarily close to $\tau$ (for any arbitrary choice of $\tau$ ), while still having a perfect reconstruction of $\mathbf{X}$ .
|
| 211 |
+
|
| 212 |
+
# 3 EXPERIMENTS
|
| 213 |
+
|
| 214 |
+
Here, we present experiments on both real and synthetic data that verify our theoretical predictions experimentally. We first evaluate the Autoencoder Regularization form given in (3) by repeating all of the experiments from Ji et al. (2017). In the Supplementary Material we first show that the optimization problem never reaches a stationary point due to the pathology described by Prop 1 (see Figure 4 in Supplementary Material), and below we show that the improvement in performance reported in Ji et al. (2017) is largely attributable to an ad-hoc post-processing step. Then, we present experiments on a simple synthetic dataset to illustrate our theoretical results.
|
| 215 |
+
|
| 216 |
+
Repeating the Experiments of Ji et al. (2017). First we use the code provided by the authors of Ji et al. (2017) to repeat all of their original clustering experiments on the Extended Yale-B (38 faces), ORL, and COIL100 datasets. As baseline methods, we perform subspace clustering on the raw data as well as subspace clustering on embedded features obtained by training the autoencoder network without the $F(\mathbf{Z}, \mathbf{C})$ term (i.e., $\gamma = 0$ and $\mathbf{C}$ fixed at $\mathbf{I}$ in (3)). See Supplementary Material for further details.
|
| 217 |
+
|
| 218 |
+
In addition to proposing the model in (3), the authors of Ji et al. (2017) also implement a somewhat arbitrary post-processing of the $\mathbf{C}$ matrix recovered from the SEDSC model before the final spectral
|
| 219 |
+
|
| 220 |
+
Table 1: Clustering accuracy shown in Fig 2. To be consistent with Ji et al. (2017), we report the results at 1000/120/700 iterations for Yale B / COIL100 / ORL, respectively.
|
| 221 |
+
|
| 222 |
+
<table><tr><td></td><td colspan="3">With post-processing</td><td colspan="3">Without post-processing</td></tr><tr><td></td><td>Raw Data</td><td>Autoenc only</td><td>Full SEDSC</td><td>Raw Data</td><td>Autoenc only</td><td>Full SEDSC</td></tr><tr><td>YaleB</td><td>94.40%</td><td>97.12%</td><td>96.79%</td><td>68.71%</td><td>71.96%</td><td>59.09%</td></tr><tr><td>COIL100</td><td>66.47%</td><td>68.26%</td><td>64.96%</td><td>47.51%</td><td>44.84%</td><td>45.67%</td></tr><tr><td>ORL</td><td>78.12%</td><td>83.43%</td><td>84.10%</td><td>72.68%</td><td>73.73%</td><td>73.50%</td></tr></table>
|
| 223 |
+
|
| 224 |
+

|
| 225 |
+
Figure 3: Results for synthetic data using the dataset normalization scheme. (Left) Original data points (Blue) and the data points at the output of the autoencoder when the full model (3) is used (Red). (Center Left) Data representation in the embedded domain when just the autoencoder is trained without the $F(\mathbf{Z}, \mathbf{C})$ term (Blue) and the full SEDSC model is used (Red). (Center Right) The absolute value of the recovered C encoding matrix when trained with the full model. (Right) Same plot as the previous column but with a logarithmic color scale to visualize small entries.
|
| 226 |
+
|
| 227 |
+
clustering, which involves 1) entrywise hard thresholding, 2) applying the shape interaction matrix method Costeira & Kanade (1995) to $\mathbf{C}$ , and 3) raising $\mathbf{C}$ to a power, entry-wise. Likewise, many subsequent works on SEDSC follow Ji et al. (2017) and employ very similar post-processing steps on $\mathbf{C}$ . As shown in Figure 2 and Table 1 there is little observed benefit for using SEDSC, as it achieves comparable (or worse) performance than baseline methods in almost all settings when the post-processing of $\mathbf{C}$ is applied consistently (or not) across all methods.
|
| 228 |
+
|
| 229 |
+
Synthetic Data Experiments. Finally, to illustrate our theoretical predictions we construct a simple synthetic dataset which consists of 100 points drawn from the union of two parabolas in $\mathbb{R}^2$ , where the space of the embedding is also $\mathbb{R}^2$ . We then train the model given in (3) with $\theta(\mathbf{C}) = \theta_{SSC}(\mathbf{C}) = \| \mathbf{C}\|_1 + \delta(\mathrm{diag}(\mathbf{C}) = \mathbf{0})$ , the encoder/decoder networks being simple single-hidden-layer fully-connected networks with 100 hidden units, and ReLU activations on the hidden units. Figure 3 shows the solution obtained when we directly add a normalization operator to the encoder network which normalizes the output of the encoder to have unit Frobenius norm over the entire dataset (Dataset Normalization). Additional experiments for other normalization schemes and a description of the details of our experiments can be found in the Supplementary Material.
|
| 230 |
+
|
| 231 |
+
From Figure 3 one can see that our theoretical predictions are largely confirmed experimentally. Namely, one sees that when the full SEDSC model is trained the embedded representation largely as predicted by Theorem 2, with almost all of the embedded points (Left Center - Red points) being close to the origin with the exception of two points, which are co-linear with each other. Likewise, the C matrix is dominated by two non-zero entries with the remaining non-zero entries only appearing on the log-scale color scale. We note that as this is a very simple dataset (i.e., two parabolas without any added noise) one would expect most reasonable manifold clustering/learning algorithms to succeed; however, due to the deficiencies of the SEDSC model we have shown in our analysis a trivial solution results.
|
| 232 |
+
|
| 233 |
+
# 4 CONCLUSIONS
|
| 234 |
+
|
| 235 |
+
We have presented a theoretical and experimental analysis of the Self-Expressive Deep Subspace Clustering (SEDSC) model. We have shown that in many cases the SEDSC model is ill-posed and results in trivial data geometries in the embedded space. Further, our attempts to replicate previously reported experiments lead us to conclude that much of the claimed benefit of SEDSC is attributable to other factors such as post-processing of the recovered encoding matrix, C, and not the SEDSC model itself. Overall, we conclude that considerably more attention needs to be given to the issues we have raised in this paper in terms of both how models for this problem are designed and how they are evaluated to ensure that one arrives at meaningful solutions and can clearly demonstrate the performance of the resulting model without other confounding factors.
|
| 236 |
+
|
| 237 |
+
Acknowledgments. The authors thank Zhihui Zhu and Benjamin Béjar Haro for helpful discussions in the early stages of this work. This work was partially supported by the Northrop Grumman Mission Systems Research in Applications for Learning Machines (REALM) initiative, NSF Grants 1704458, 2031985 and 1934979, and the Tsinghua-Berkeley Shenzhen Institute Research Fund.
|
| 238 |
+
|
| 239 |
+
# REFERENCES
|
| 240 |
+
|
| 241 |
+
Mahdi Abavisani and Vishal M Patel. Deep multimodal subspace clustering networks. IEEE Journal of Selected Topics in Signal Processing, 12(6):1601-1614, 2018.
|
| 242 |
+
Joao Costeira and Takeo Kanade. A multi-body factorization method for motion analysis. In IEEE International Conference on Computer Vision, pp. 1071-1076. IEEE, 1995.
|
| 243 |
+
Ehsan Elhamifar and René Vidal. Sparse subspace clustering. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 2790-2797, 2009.
|
| 244 |
+
Ehsan Elhamifar and René Vidal. Sparse subspace clustering: Algorithm, theory, and applications. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(11):2765-2781, 2013.
|
| 245 |
+
Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International Conference on Machine Learning, pp. 448-456, 2015.
|
| 246 |
+
Pan Ji, Tong Zhang, Hongdong Li, Mathieu Salzmann, and Ian Reid. Deep subspace clustering networks. In Advances in Neural Information Processing Systems, pp. 24-33, 2017.
|
| 247 |
+
Yangbangyan Jiang, Zhiyong Yang, Qianqian Xu, Xiaochun Cao, and Qingming Huang. When to learn what: Deep cognitive subspace clustering. In Proceedings of the 26th ACM international conference on Multimedia, pp. 718-726, 2018.
|
| 248 |
+
Yangbangyan Jiang, Qianqian Xu, Zhiyong Yang, Xiaochun Cao, and Qingming Huang. Duet robust deep subspace clustering. In Proceedings of the 27th ACM International Conference on Multimedia, pp. 1596-1604, 2019.
|
| 249 |
+
Mohsen Kheirandishfard, Fariba Zohrizadeh, and Farhad Kamangar. Deep low-rank subspace clustering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pp. 864-865, 2020a.
|
| 250 |
+
Mohsen Kheirandishfard, Fariba Zohrizadeh, and Farhad Kamangar. Multi-level representation learning for deep subspace clustering. In The IEEE Winter Conference on Applications of Computer Vision, pp. 2039-2048, 2020b.
|
| 251 |
+
Chun-Guang Li, Chong You, and René Vidal. On geometric analysis of affine sparse subspace clustering. IEEE Journal on Selected Topics in Signal Processing, 12(6):1520-1533, 2018.
|
| 252 |
+
Ruihuang Li, Changqing Zhang, Huazhu Fu, Xi Peng, Tianyi Zhou, and Qinghua Hu. Reciprocal multi-layer subspace learning for multi-view clustering. In IEEE International Conference on Computer Vision, pp. 8172-8180, 2019.
|
| 253 |
+
Guangcan Liu, Zhouchen Lin, Shuicheng Yan, Ju Sun, and Yi Ma. Robust recovery of subspace structures by low-rank representation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(1):171-184, 2013.
|
| 254 |
+
Can-Yi Lu, Hai Min, Zhong-Qiu Zhao, Lin Zhu, De-Shuang Huang, and Shuicheng Yan. Robust and efficient subspace segmentation via least squares regression. In European Conference on Computer Vision, pp. 347-360, 2012.
|
| 255 |
+
N. Parikh and S. Boyd. Proximal Algorithms. Foundations and Trends in Optimization, 1(3):123-231, 2013.
|
| 256 |
+
V. M. Patel and R. Vidal. Kernel sparse subspace clustering. In IEEE International Conference on Image Processing, pp. 2849-2853, 2014.
|
| 257 |
+
V. M. Patel, H. V. Nguyen, and R. Vidal. Latent space sparse subspace clustering. In IEEE International Conference on Computer Vision, pp. 225-232, 2013.
|
| 258 |
+
V. M. Patel, H. V. Nguyen, and R. Vidal. Latent space sparse and low-rank subspace clustering. IEEE Journal of Selected Topics in Signal Processing, 9(4):691-701, 2015.
|
| 259 |
+
|
| 260 |
+
Xi Peng, Jiashi Feng, Shijie Xiao, Jiwen Lu, Zhang Yi, and Shuicheng Yan. Deep sparse subspace clustering. arXiv preprint arXiv:1709.08374, 2017.
|
| 261 |
+
Daniel P Robinson, Rene Vidal, and Chong You. Basis pursuit and orthogonal matching pursuit for subspace-preserving recovery: Theoretical analysis. arXiv preprint arXiv:1912.13091, 2019.
|
| 262 |
+
Mahdi Soltanolkotabi and Emmanuel J. Candès. A geometric analysis of subspace clustering with outliers. Annals of Statistics, 40(4):2195-2238, 2012.
|
| 263 |
+
Mahdi Soltanolkotabi, Ehsan Elhamifar, and Emmanuel J. Candès. Robust subspace clustering. Annals of Statistics, 42(2):669-699, 2014.
|
| 264 |
+
Xiukun Sun, Miaomiao Cheng, Chen Min, and Liping Jing. Self-supervised deep multi-view subspace clustering. In Asian Conference on Machine Learning, pp. 1001-1016, 2019.
|
| 265 |
+
Xiaoliang Tang, Xuan Tang, Wanli Wang, Li Fang, and Xian Wei. Deep multi-view sparse subspace clustering. In Proceedings of the 2018 VII International Conference on Network, Communication and Computing, pp. 115-119, 2018.
|
| 266 |
+
Manolis C. Tsakiris and René Vidal. Theoretical analysis of sparse subspace clustering with missing entries. In International Conference on Machine Learning, pp. 4975-4984, 2018.
|
| 267 |
+
Réné Vidal and Paolo Favaro. Low rank subspace clustering (LRSC). Pattern Recognition Letters, 43:47-61, 2014.
|
| 268 |
+
Réné Vidal, Yi Ma, and Shankar Sastry. Generalized Principal Component Analysis. Springer Verlag, 2016.
|
| 269 |
+
Ulrike von Luxburg. A tutorial on spectral clustering. Statistics and Computing, 17(4):395-416, 2007.
|
| 270 |
+
Yining Wang, Yu-Xiang Wang, and Aarti Singh. A deterministic analysis of noisy sparse subspace clustering for dimensionality-reduced data. In International Conference on Machine Learning, pp. 1422-1431, 2015.
|
| 271 |
+
Yu-Xiang Wang and Huan Xu. Noisy sparse subspace clustering. Journal of Machine Learning Research, 17(12):1-41, 2016.
|
| 272 |
+
Yuan Xie, Jinyan Liu, Yanyun Qu, Dacheng Tao, Wensheng Zhang, Longquan Dai, and Lizhuang Ma. Robust kernelized multiview self-representation for subspace clustering. IEEE Transactions on Neural Networks and Learning Systems, 2020.
|
| 273 |
+
Shuai Yang, Wenqi Zhu, and Yuesheng Zhu. Residual encoder-decoder network for deep subspace clustering. arXiv preprint arXiv:1910.05569, 2019.
|
| 274 |
+
Yingzhen Yang, Jiashi Feng, Nebojsa Jojic, Jianchao Yang, and Thomas S Huang. $\ell_0$ -sparse subspace clustering. In European Conference on Computer Vision, pp. 731-747, 2016.
|
| 275 |
+
C. You and R. Vidal. Subspace-sparse representation. *Arxiv*, abs/1507.01307, 2015a.
|
| 276 |
+
Chong You and René Vidal. Geometric conditions for subspace-sparse recovery. In International Conference on Machine Learning, pp. 1585-1593, 2015b.
|
| 277 |
+
Chong You, Chun-Guang Li, Daniel P. Robinson, and René Vidal. Oracle based active set algorithm for scalable elastic net subspace clustering. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 3928-3937, 2016.
|
| 278 |
+
Chong You, Chun-Guang Li, Daniel P. Robinson, and René Vidal. Is an affine constraint needed for affine subspace clustering? In IEEE International Conference on Computer Vision, 2019.
|
| 279 |
+
Meng Zeng, Yaoming Cai, Zhihua Cai, Xiaobo Liu, Peng Hu, and Junhua Ku. Unsupervised hyperspectral image band selection based on deep subspace clustering. IEEE Geoscience and Remote Sensing Letters, 16(12):1889-1893, 2019a.
|
| 280 |
+
|
| 281 |
+
Meng Zeng, Yaoming Cai, Xiaobo Liu, Zhihua Cai, and Xiang Li. Spectral-spatial clustering of hyperspectral image based on laplacian regularized deep subspace clustering. In IGARSS 2019-2019 IEEE International Geoscience and Remote Sensing Symposium, pp. 2694-2697. IEEE, 2019b.
|
| 282 |
+
C. Zhang, S. Bengio, M. Hardt, and B. Recht. Understanding deep learning requires rethinking generalization. In International Conference on Learning Representations, 2017.
|
| 283 |
+
Changqing Zhang, Huazhu Fu, Qinghua Hu, Xiaochun Cao, Yuan Xie, Dacheng Tao, and Dong Xu. Generalized latent multi-view subspace clustering. IEEE Transactions on Pattern Analysis and Machine Intelligence, 42(1):86-99, 2018.
|
| 284 |
+
Junjian Zhang, Chun-Guang Li, Chong You, Xianbiao Qi, Honggang Zhang, Jun Guo, and Zhouchen Lin. Self-supervised convolutional subspace clustering network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5473-5482, 2019a.
|
| 285 |
+
Tong Zhang, Pan Ji, Mehrtash Harandi, Huang Wenbing, and Hongdong Li. Neural collaborative subspace clustering. In ICML, 2019b.
|
| 286 |
+
Lei Zhou, Bai Xiao, Xianglong Liu, Jun Zhou, Edwin R Hancock, et al. Latent distribution preserving deep subspace clustering. In 28th International Joint Conference on Artificial Intelligence. York, 2019.
|
| 287 |
+
Pan Zhou, Yunqing Hou, and Jiashi Feng. Deep adversarial subspace clustering. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 1596-1604, 2018.
|
| 288 |
+
|
| 289 |
+
# 5 PROOFS OF RESULTS IN MAIN PAPER
|
| 290 |
+
|
| 291 |
+
Here we present the proofs of our various results that we give in the main paper.
|
| 292 |
+
|
| 293 |
+
# 5.1 PROOF OF PROPOSITION 1
|
| 294 |
+
|
| 295 |
+
Proposition 1. Consider the objective in (3) and suppose $\theta(\mathbf{C})$ in (1) is a function which achieves its minimum at $\mathbf{C} = \mathbf{0}$ and satisfies $\theta(\mu \mathbf{C}) < \theta(\mathbf{C}), \forall \mu \in (0,1)$ . If for any choice of $(\mathcal{W}_d, \mathcal{W}_e)$ and $\tau_1, \tau_2 \in (0,1)$ there exists $(\hat{\mathcal{W}}_d, \hat{\mathcal{W}}_e)$ such that $\Phi_E(\mathbf{X}, \hat{\mathcal{W}}_e) = \tau_1 \Phi_E(\mathbf{X}, \mathcal{W}_e)$ and $\Phi_D(\tau_2 \mathbf{Z}, \hat{\mathcal{W}}_d) = \Phi_D(\mathbf{Z}, \mathcal{W}_d)$ then the $F$ term in (3) can be made arbitrarily small without changing the value of the loss function $\ell$ . Further, if the final layer of the encoding network is positively-homogeneous with degree $p \neq 0$ , such a $(\hat{\mathcal{W}}_d, \hat{\mathcal{W}}_e)$ will always exist simply by scaling the network weights of the linear (or affine) operator parameters.
|
| 296 |
+
|
| 297 |
+
Proof. Let $(\mathbf{C},\mathcal{W}_e,\mathcal{W}_d)$ be an arbitrary triplet. For any choice of $\tau, \mu \in (0,1)$ , note the statement conditions require that there exists $(\hat{\mathbf{C}},\hat{\mathcal{W}}_e,\hat{\mathcal{W}}_d)$ which satisfy
|
| 298 |
+
|
| 299 |
+
$$
|
| 300 |
+
\hat {\mathbf {C}} = \mu \mathbf {C}
|
| 301 |
+
$$
|
| 302 |
+
|
| 303 |
+
$$
|
| 304 |
+
\Phi_ {E} (\mathbf {X}, \hat {\mathcal {W}} _ {e}) = \tau \Phi_ {E} (\mathbf {X}, \mathcal {W} _ {e}) \tag {15}
|
| 305 |
+
$$
|
| 306 |
+
|
| 307 |
+
$$
|
| 308 |
+
\Phi_ {D} (\mu \tau \mathbf {Z}, \hat {\mathcal {W}} _ {d}) = \Phi_ {D} (\mathbf {Z}, \mathcal {W} _ {d}).
|
| 309 |
+
$$
|
| 310 |
+
|
| 311 |
+
Then, the function $\ell$ evaluated at $(\hat{\mathbf{C}},\hat{\mathcal{W}}_e,\hat{\mathcal{W}}_d)$ is given by
|
| 312 |
+
|
| 313 |
+
$$
|
| 314 |
+
\ell (\mathbf {X}, \Phi_ {D} \left(\Phi_ {E} (\mathbf {X}, \hat {\mathcal {W}} _ {e}) \mu \mathbf {C}, \hat {\mathcal {W}} _ {d}\right)) = \ell (\mathbf {X}, \Phi_ {D} \left(\Phi_ {E} (\mathbf {X}, \mathcal {W} _ {e}) \mathbf {C}, \mathcal {W} _ {d}\right)), \tag {16}
|
| 315 |
+
$$
|
| 316 |
+
|
| 317 |
+
which is equal to $\ell$ evaluated at $(\mathbf{C},\mathcal{W}_e,\mathcal{W}_d)$ . Moreover, the function $F$ evaluated at $(\hat{\mathbf{C}},\hat{\mathcal{W}}_e,\hat{\mathcal{W}}_d)$ is given by
|
| 318 |
+
|
| 319 |
+
$$
|
| 320 |
+
\begin{array}{l} \frac {1}{2} \| \Phi_ {E} (\mathbf {X}, \hat {\mathcal {W}} _ {e}) - \Phi_ {E} (\mathbf {X}, \hat {\mathcal {W}} _ {e}) \mu \mathbf {C} \| _ {F} ^ {2} + \lambda \theta (\mu \mathbf {C}) \\ = \tau^ {2} \frac {1}{2} \| \Phi_ {E} (\mathbf {X}, \mathcal {W} _ {e}) - \Phi_ {E} (\mathbf {X}, \mathcal {W} _ {e}) \mu \mathbf {C} \| _ {F} ^ {2} + \lambda \theta (\mu \mathbf {C}). \tag {17} \\ \end{array}
|
| 321 |
+
$$
|
| 322 |
+
|
| 323 |
+
Note that the above equation can be made arbitrarily small for choice of $\tau, \mu$ sufficiently small, completing the first statement of the result.
|
| 324 |
+
|
| 325 |
+
To see the second part of the claim, first note that the condition on the decoder – that for all $\tau_{2} \in (0,1)$ there exists $\hat{\mathcal{W}}_d$ such that $\Phi_D(\tau_2\mathbf{Z},\hat{\mathcal{W}}_d) = \Phi_D(\mathbf{Z},\mathcal{W}_d)$ – is trivially satisfied by all neural networks by simply scaling the input weights in the first layer of the decoder network by $\tau_{2}^{-1}$ . As a result, we are left to show that there will always exist a set of encoder weights which satisfies the conditions of the statement. To see this, w.l.o.g. let $\Phi_e(\mathbf{X},\mathcal{W}_e)$ take the general form
|
| 326 |
+
|
| 327 |
+
$$
|
| 328 |
+
\Phi_ {e} (\mathbf {X}, \mathcal {W} _ {e}) = \psi (\mathcal {A} (h (\mathbf {X}, \bar {\mathcal {W}} _ {e}), \mathbf {A}, \mathbf {b})) \tag {18}
|
| 329 |
+
$$
|
| 330 |
+
|
| 331 |
+
where $\mathcal{A}(\cdot, \mathbf{A}, \mathbf{b})$ is an arbitrary linear (or affine) operator parameterized by linear parameters $\mathbf{A}$ and bias terms (for affine operators) $\mathbf{b}$ ; $h(\mathbf{X}, \bar{\mathcal{W}}_e)$ is an arbitrary function parameterized by $\bar{\mathcal{W}}_e$ (note that $\mathcal{W}_e = \{\mathbf{A}, \mathbf{b}, \bar{\mathcal{W}}_e\}$ ); and $\psi$ is an arbitrary positively homogeneous function with degree $p \neq 0$ . From this note that for any $\alpha > 0$ we have the following:
|
| 332 |
+
|
| 333 |
+
$$
|
| 334 |
+
\psi \left(\mathcal {A} \left(h (\mathbf {X}, \bar {\mathcal {W}} _ {e}), \alpha \mathbf {A}, \alpha \mathbf {b}\right)\right) = \psi \left(\alpha \mathcal {A} \left(h (\mathbf {X}, \bar {\mathcal {W}} _ {e}), \mathbf {A}, \mathbf {b}\right)\right) = \alpha^ {p} \psi \left(\mathcal {A} \left(h (\mathbf {X}, \bar {\mathcal {W}} _ {e}), \mathbf {A}, \mathbf {b}\right)\right). \tag {19}
|
| 335 |
+
$$
|
| 336 |
+
|
| 337 |
+
where the first equality is due to basic properties of linear (or affine) operators and the second equality is due to positive homogeneity. As a result, for any choice of $\tau_{1} \in (0,1)$ we can choose a scaling $\alpha = \tau_{1}^{1/p}$ to achieve that for parameters $\hat{\mathcal{W}}_{e} = (\tau_{1}^{1/p}\mathbf{A},\tau_{1}^{1/p}\mathbf{b},\bar{\mathcal{W}}_{e})$ we have $\Phi_{e}(\mathbf{X},\hat{\mathcal{W}}_{e}) = \tau_{1}\Phi_{e}(\mathbf{X},\mathcal{W}_{e})$ , completing the result.
|
| 338 |
+
|
| 339 |
+
# 5.2 PROOF OF THEOREM 1
|
| 340 |
+
|
| 341 |
+
Theorem 1. Consider the following optimization problems which jointly optimize over $\mathbf{Z}$ and $\mathbf{C}$ :
|
| 342 |
+
|
| 343 |
+
$$
|
| 344 |
+
\left. \min _ {\mathbf {C}, \mathbf {Z}} \left\{F (\mathbf {Z}, \mathbf {C}) \text {s . t .} \| \mathbf {Z} \| _ {F} ^ {2} \geq \tau \right\} \quad \text {(P 2)} \min _ {\mathbf {C}, \mathbf {Z}} \left\{F (\mathbf {Z}, \mathbf {C}) \text {s . t .} \| \mathbf {Z} ^ {i} \| _ {F} ^ {2} \geq \frac {\tau}{d} \forall i \right\}\right. \tag {20}
|
| 345 |
+
$$
|
| 346 |
+
|
| 347 |
+
Then optimal values for $\mathbf{C}$ for both (P1) and (P2) are given by
|
| 348 |
+
|
| 349 |
+
$$
|
| 350 |
+
\mathbf {C} ^ {*} \in \underset {\mathbf {C}} {\arg \min } \frac {1}{2} \sigma_ {\min } ^ {2} (\mathbf {C} - \mathbf {I}) \tau + \lambda \theta (\mathbf {C}). \tag {21}
|
| 351 |
+
$$
|
| 352 |
+
|
| 353 |
+
Moreover, for any optimal $\mathbf{C}^*$ , let $r$ be the multiplicity of the smallest singular value of $\mathbf{C}^* - \mathbf{I}$ and let $\mathbf{Q} \in \mathbb{R}^{N \times r}$ be an orthonormal basis for the subspace spanned by the left singular vectors associated with the smallest singular value of $\mathbf{C}^* - \mathbf{I}$ . Then we have that optimal values for $\mathbf{Z}$ are given by:
|
| 354 |
+
|
| 355 |
+
$$
|
| 356 |
+
\mathbf {Z} ^ {*} \in \left\{\mathbf {B Q} ^ {\top}: \mathbf {B} \in \mathbb {R} ^ {d \times r} \cap \mathcal {B} \right\}, \quad \mathcal {B} = \left\{ \begin{array}{l l} \left\{\left\{\mathbf {B}: \| \mathbf {B} \| _ {F} ^ {2} = \tau \right\} \quad \sigma_ {\min } \left(\mathbf {C} ^ {*} - \mathbf {I}\right) > 0 \right. & (\text {P 1}) \\ \left\{\left\{\mathbf {B}: \| \mathbf {B} \| _ {F} ^ {2} \geq \tau \right\} \quad \sigma_ {\min } \left(\mathbf {C} ^ {*} - \mathbf {I}\right) = 0 \right. & \\ \left\{\left\{\mathbf {B}: \| \mathbf {B} ^ {i} \| _ {F} ^ {2} = \frac {\tau}{d}, \forall i \right\} \quad \sigma_ {\min } \left(\mathbf {C} ^ {*} - \mathbf {I}\right) > 0 \right. & (\text {P 2}) \\ \left\{\left\{\mathbf {B}: \| \mathbf {B} ^ {i} \| _ {F} ^ {2} \geq \frac {\tau}{d}, \forall i \right\} \quad \sigma_ {\min } \left(\mathbf {C} ^ {*} - \mathbf {I}\right) = 0 \right. & \end{array} \right.
|
| 357 |
+
$$
|
| 358 |
+
|
| 359 |
+
Proof. The objective $F(\mathbf{Z}, \mathbf{C})$ can be reformulated as
|
| 360 |
+
|
| 361 |
+
$$
|
| 362 |
+
F (\mathbf {Z}, \mathbf {C}) = \frac {1}{2} \operatorname {t r} (\mathbf {Z} (\mathbf {C} - \mathbf {I}) (\mathbf {C} - \mathbf {I}) ^ {\top} \mathbf {Z} ^ {\top}) + \lambda \theta (\mathbf {C}) = \frac {1}{2} \sum_ {i = 1} ^ {d} \mathbf {Z} ^ {i} (\mathbf {C} - \mathbf {I}) (\mathbf {C} - \mathbf {I}) ^ {\top} (\mathbf {Z} ^ {i}) ^ {\top} + \lambda \theta (\mathbf {C}), \tag {23}
|
| 363 |
+
$$
|
| 364 |
+
|
| 365 |
+
where recall $\mathbf{Z}^i$ denotes the $i^{th}$ row of $\mathbf{Z}$ . If we add the constraints on $\mathbf{Z}$ we have the following minimization problems over $\mathbf{Z}$ with $\mathbf{C}$ held fixed:
|
| 366 |
+
|
| 367 |
+
$$
|
| 368 |
+
\min _ {\mathbf {Z}} \frac {1}{2} \sum_ {i = 1} ^ {d} \mathbf {Z} ^ {i} (\mathbf {C} - \mathbf {I}) (\mathbf {C} - \mathbf {I}) ^ {\top} (\mathbf {Z} ^ {i}) ^ {\top} \text {s . t .} \mathbf {Z} \in \mathcal {Z} = \left\{ \begin{array}{l l} \sum_ {i = 1} ^ {d} \| \mathbf {Z} ^ {i} \| _ {F} ^ {2} \geq \tau & \text {(P 1)} \\ \| \mathbf {Z} ^ {i} \| _ {F} ^ {2} \geq \frac {\tau}{d} \forall i & \text {(P 2)} \end{array} . \right. \tag {24}
|
| 369 |
+
$$
|
| 370 |
+
|
| 371 |
+
Note that if we fix the magnitude of the rows, $\| \mathbf{Z}^i \|_F^2 = \mathbf{k}_i$ for any $\mathbf{k} \in \mathbb{R}^d$ , $\mathbf{k} \geq 0$ , $\sum_{i=1}^d \mathbf{k}_i \geq \tau$ for (P1) and $\mathbf{k} \in \mathbb{R}^d$ , $\mathbf{k}_i \geq \tau / d$ for (P2) and optimize over the directions of the rows, then the minimum is obtained whenever $\mathbf{Z}^i$ is in the span of the eigenvectors of $(\mathbf{C} - \mathbf{I})(\mathbf{C} - \mathbf{I})^\top$ with smallest eigenvalue, which implies that all the rows of an optimal $\mathbf{Z}$ matrix must lie in the span of $\mathbf{Q}$ , where $\mathbf{Q} \in \mathbb{R}^{N \times r}$ is an orthonormal basis for the subspace by the left singular vectors of $\mathbf{C} - \mathbf{I}$ associated with the smallest singular value of $\mathbf{C} - \mathbf{I}$ , which has multiplicity $r$ .
|
| 372 |
+
|
| 373 |
+
As a result, we have that optimal values of $\mathbf{Z}$ must take the form $\mathbf{Z} = \mathbf{BQ}^{\top}$ for some $\mathbf{B} \in \mathbb{R}^{d \times r}$ . Further, we note that the following also holds:
|
| 374 |
+
|
| 375 |
+
$$
|
| 376 |
+
\begin{array}{l} F (\mathbf {B Q} ^ {\top}, \mathbf {C}) = \frac {1}{2} \operatorname {t r} (\mathbf {B Q} ^ {\top} (\mathbf {C} - \mathbf {I}) (\mathbf {C} - \mathbf {I}) ^ {\top} \mathbf {Q B} ^ {\top}) + \lambda \theta (\mathbf {C}) = \tag {25} \\ = \frac {1}{2} \sigma_ {\mathrm {m i n}} ^ {2} (\mathbf {C} - \mathbf {I}) \mathrm {t r} (\mathbf {B} \mathbf {B} ^ {\top}) + \lambda \theta (\mathbf {C}) = \frac {1}{2} \sigma_ {\mathrm {m i n}} ^ {2} (\mathbf {C} - \mathbf {I}) \| \mathbf {B} \| _ {F} ^ {2} + \lambda \theta (\mathbf {C}). \\ \end{array}
|
| 377 |
+
$$
|
| 378 |
+
|
| 379 |
+
The constraints for $\mathbf{B}$ are then seen by noting $\mathbf{Z}\mathbf{Z}^{\top} = \mathbf{B}\mathbf{Q}^{\top}\mathbf{Q}\mathbf{B}^{\top} = \mathbf{B}\mathbf{B}^{\top}$ , so $\| \mathbf{Z}\| _F^2 = \mathrm{tr}(\mathbf{Z}\mathbf{Z}^\top) = \mathrm{tr}(\mathbf{BB}^\top) = \| \mathbf{B}\| _F^2$ and $\| \mathbf{Z}^i\| _F^2 = (\mathbf{ZZ}^\top)_{i,i} = (\mathbf{BB}^\top)_{i,i} = \mathbf{B}^i (\mathbf{B}^i)^\top = \| \mathbf{B}^i\| _F^2$ , and if $\sigma_{\min}(\mathbf{C}^{*} - \mathbf{I}) > 0$ then minimizing (25) w.r.t. $\mathbf{B}$ subject to the constraints on $\mathbf{Z}$ gives that $\| \mathbf{B}\| _F^2 = \tau$ is optimal.
|
| 380 |
+
|
| 381 |
+
# 5.3 PROOF OF THEOREM 2
|
| 382 |
+
|
| 383 |
+
Theorem 2. Optimal solutions to the problems
|
| 384 |
+
|
| 385 |
+
$$
|
| 386 |
+
\min _ {\mathbf {Z}, \mathbf {C}} \| \mathbf {C} \| _ {1} \text {s . t .} \operatorname {d i a g} (\mathbf {C}) = \mathbf {0}, \mathbf {Z} = \mathbf {Z} \mathbf {C}, \| \mathbf {Z} \| _ {F} ^ {2} \geq \tau \tag {26}
|
| 387 |
+
$$
|
| 388 |
+
|
| 389 |
+
$$
|
| 390 |
+
\min _ {\mathbf {Z}, \mathbf {C}} \| \mathbf {C} \| _ {1} \text {s . t .} \operatorname {d i a g} (\mathbf {C}) = \mathbf {0}, \mathbf {Z} = \mathbf {Z} \mathbf {C}, \| \mathbf {Z} ^ {i} \| _ {F} ^ {2} \geq \frac {\tau}{d} \forall i \tag {27}
|
| 391 |
+
$$
|
| 392 |
+
|
| 393 |
+
are characterized by the set
|
| 394 |
+
|
| 395 |
+
$$
|
| 396 |
+
\left(\mathbf {Z} ^ {*}, \mathbf {C} ^ {*}\right) \in \left\{\left[ \begin{array}{l l l} \mathbf {z} & \mathbf {z} & \mathbf {0} _ {d \times N - 2} \end{array} \right] \mathbf {P} \right\} \times \left\{\mathbf {P} ^ {\top} \left[ \begin{array}{c c c c c} 0 & 1 & 0 & \dots & 0 \\ 1 & 0 & 0 & \dots & 0 \\ 0 & 0 & 0 & \dots & 0 \\ \vdots & \vdots & \vdots & \ddots & \\ 0 & 0 & 0 & & 0 \end{array} \right] \mathbf {P} \right\}, \tag {28}
|
| 397 |
+
$$
|
| 398 |
+
|
| 399 |
+
where $\mathbf{P} \in \mathbb{R}^{N \times N}$ is an arbitrary signed-permutation matrix and $\mathbf{z} \in \mathbb{R}^d$ is an arbitrary vector which satisfies $\| \mathbf{z} \|_F^2 \geq \tau / 2$ for (P1) and $\mathbf{z}_i^2 \geq \tau / (2d), \forall i \in [1, d]$ for (P2).
|
| 400 |
+
|
| 401 |
+
Proof. As we observed from the proof of Theorem 1, if $\mathbf{C} - \mathbf{I}$ has a left null-space we can choose an optimal $\mathbf{Z}$ to have its rows in that null-space (and this also clearly implies we have $\sigma_{\min}(\mathbf{C} - \mathbf{I}) = 0$ ). Also note that when $\sigma_{\min}(\mathbf{C} - \mathbf{I}) = 0$ this corresponds to the case where $\mathbf{Z} = \mathbf{Z}\mathbf{C} \iff \mathbf{Z}(\mathbf{I} - \mathbf{C}) = \mathbf{0}$ . Further, note that if $\mathbf{q}$ is a non-zero vector in the left null-space of $\mathbf{C} - \mathbf{I}$ we have that $\mathbf{q}^\top (\mathbf{C} - \mathbf{I}) = \mathbf{0} \iff \mathbf{q}^\top \mathbf{C} = \mathbf{q}^\top$ , which implies that if we take $\mathbf{q}$ to be all-zero except for its $i^{\text{th}}$ entry, then this would imply that $\mathbf{C}_{i,i}$ must be non-zero, which would violate the $\operatorname{diag}(\mathbf{C}) = \mathbf{0}$ constraint, so any vector $\mathbf{q}$ in a left null-space of $\mathbf{C} - \mathbf{I}$ for a feasible $\mathbf{C}$ matrix must have at least two non-zero entries. As a result, solving (5) with the constraint $\sigma_{\min}(\mathbf{C} - \mathbf{I}) = 0$ is equivalent to the following problem:
|
| 402 |
+
|
| 403 |
+
$$
|
| 404 |
+
\min _ {\mathbf {C}, \mathbf {q}} \| \mathbf {C} \| _ {1} \text {s . t .} \mathbf {q} ^ {\top} (\mathbf {C} - \mathbf {I}) = \mathbf {0}, \operatorname {d i a g} (\mathbf {C}) = \mathbf {0}, \| \mathbf {q} \| _ {0} \geq 2. \tag {29}
|
| 405 |
+
$$
|
| 406 |
+
|
| 407 |
+
where $\| \cdot \|_0$ denotes the $\ell_0$ pseudo-norm of a vector defined as the number of non-zero entries in a vector. To first minimize w.r.t. $\mathbf{C}$ with a fixed $\mathbf{q}$ , we form the Lagrangian:
|
| 408 |
+
|
| 409 |
+
$$
|
| 410 |
+
\min _ {\mathbf {C}} \left\{L (\mathbf {C}, \boldsymbol {\Lambda}, \boldsymbol {\Gamma}) = \| \mathbf {C} \| _ {1} + \langle \boldsymbol {\Lambda}, (\mathbf {I} - \mathbf {C} ^ {\top}) \mathbf {q} \rangle + \langle \operatorname {D i a g} (\boldsymbol {\Gamma}), \mathbf {C} \rangle \right\} = \tag {30}
|
| 411 |
+
$$
|
| 412 |
+
|
| 413 |
+
$$
|
| 414 |
+
\min _ {\mathbf {C}} \| \mathbf {C} \| _ {1} + \langle \mathbf {q} \boldsymbol {\Lambda} ^ {\top} - \operatorname {D i a g} (\boldsymbol {\Gamma}), \mathbf {C} \rangle + \langle \boldsymbol {\Lambda}, \mathbf {q} \rangle = \langle \boldsymbol {\Lambda}, \mathbf {q} \rangle - \delta \left(\| \mathbf {q} \boldsymbol {\Lambda} ^ {\top} - \operatorname {D i a g} (\boldsymbol {\Gamma}) \| _ {\infty} \leq 1\right), \tag {31}
|
| 415 |
+
$$
|
| 416 |
+
|
| 417 |
+
where $\mathbf{\Lambda} \in \mathbb{R}^N$ and $\mathbf{\Gamma} \in \mathbb{R}^N$ are vectors of dual variables to enforce the $(\mathbf{I} - \mathbf{C}^\top)\mathbf{q} = \mathbf{0}$ and the $\mathrm{diag}(\mathbf{C}) = \mathbf{0}$ constraints, respectively. This gives the dual problem
|
| 418 |
+
|
| 419 |
+
$$
|
| 420 |
+
\max _ {\boldsymbol {\Lambda}, \boldsymbol {\Gamma}} \left\{\langle \boldsymbol {\Lambda}, \mathbf {q} \rangle \text {s . t .} \| \mathbf {q} \boldsymbol {\Lambda} ^ {\top} - \operatorname {D i a g} (\boldsymbol {\Gamma}) \| _ {\infty} \leq 1 \right\} = \max _ {\boldsymbol {\Lambda}} \left\{\langle \boldsymbol {\Lambda}, \mathbf {q} \rangle \text {s . t .} | \mathbf {q} _ {i} \boldsymbol {\Lambda} _ {j} | \leq 1 \forall i \neq j \right\}. \tag {32}
|
| 421 |
+
$$
|
| 422 |
+
|
| 423 |
+
We note that (32) is separable in the entries of $\Lambda$ , so if we define $\{i_k\}_{k=1}^N$ to be the indexing which sorts the absolute values of the entries of $\mathbf{q}$ in descending order, $|\mathbf{q}_{i_1}| \geq |\mathbf{q}_{i_2}| \geq \dots \geq |\mathbf{q}_{i_N}|$ , one can easily see that an optimal choice of $\Lambda$ is given by
|
| 424 |
+
|
| 425 |
+
$$
|
| 426 |
+
\boldsymbol {\Lambda} _ {i _ {1}} ^ {*} = \frac {\operatorname {s g n} \left(\mathbf {q} _ {i _ {1}}\right)}{\left| \mathbf {q} _ {i _ {2}} \right|}, \quad \boldsymbol {\Lambda} _ {i _ {k}} ^ {*} = \frac {\operatorname {s g n} \left(\mathbf {q} _ {i _ {k}}\right)}{\left| \mathbf {q} _ {i _ {1}} \right|} \forall k \in [ 2, N ] \Longrightarrow \langle \boldsymbol {\Lambda} ^ {*}, \mathbf {q} \rangle = \frac {\left| \mathbf {q} _ {i _ {1}} \right|}{\left| \mathbf {q} _ {i _ {2}} \right|} + \frac {1}{\left| \mathbf {q} _ {i _ {1}} \right|} \sum_ {k = 2} ^ {N} \left| \mathbf {q} _ {i _ {k}} \right|. \tag {33}
|
| 427 |
+
$$
|
| 428 |
+
|
| 429 |
+
If we now minimize the above w.r.t. $\mathbf{q}$ , note that the optimal value of the dual objective given by the above equation is invariant w.r.t. scaling the $\mathbf{q}$ vector by any non-zero scalar, so we can w.l.o.g. assume that $|\mathbf{q}_{i_1}| = 1$ and note that this implies that problem (29) is equivalent to the following optimization problem over the magnitudes of $\mathbf{q}$ if we define $p_k = |\mathbf{q}_{i_k}|$ :
|
| 430 |
+
|
| 431 |
+
$$
|
| 432 |
+
\min _ {\left\{p _ {k} \right\} _ {k = 2} ^ {N}} \frac {1}{p _ {2}} + p _ {2} + \sum_ {k = 3} ^ {N} p _ {k} \text {s . t .} 1 \geq p _ {2} \geq p _ {3} \geq \dots \geq p _ {N} \geq 0. \tag {34}
|
| 433 |
+
$$
|
| 434 |
+
|
| 435 |
+
Now, note that for a non-negative scalar $\alpha \geq 0$ the minimum of $\alpha^{-1} + \alpha$ is achieved at $\alpha = 1$ , so one can clearly see that the optimal value for the above problem is achieved at $p_2 = 1$ and $p_k = 0$ , $\forall k \in [3, N]$ . From this we have that an optimal $\mathbf{q}$ for (29) must have exactly two nonzero entries and the non-zero entries must be equal in absolute value. Further, this also implies that $\| \mathbf{C}^* \|_1 = 2$ , and because we must have $\mathbf{q}^\top \mathbf{C}^* = \mathbf{q}^\top$ , if we scale $\mathbf{q}$ to have $\pm 1$ for its two non-zero entries, we then have $\| \mathbf{q}^\top \mathbf{C}^* \|_1 = \| \mathbf{q} \|_1 = 2 = \| \mathbf{C}^* \|_1$ , so if we let $(i, j)$ index the two non-zero entries of $\mathbf{q}$ we have:
|
| 436 |
+
|
| 437 |
+
$$
|
| 438 |
+
2 = \left\| \mathbf {q} ^ {\top} \mathbf {C} ^ {*} \right\| _ {1} = \left\| \operatorname {s g n} \left(\mathbf {q} _ {i}\right) \left(\mathbf {C} ^ {*}\right) ^ {i} + \operatorname {s g n} \left(\mathbf {q} _ {j}\right) \left(\mathbf {C} ^ {*}\right) ^ {j} \right\| _ {1} \leq \left\| \left(\mathbf {C} ^ {*}\right) ^ {i} \right\| _ {1} + \left\| \left(\mathbf {C} ^ {*}\right) ^ {j} \right\| _ {1} \leq \left\| \mathbf {C} ^ {*} \right\| _ {1} = 2. \tag {35}
|
| 439 |
+
$$
|
| 440 |
+
|
| 441 |
+
This implies that all the non-zero entries of $\mathbf{C}^*$ must lie in rows $i$ and $j$ , and if there is any overlap in the non-zero support of these rows the signs must match after multiplication by $\mathrm{sgn}(\mathbf{q}_i)$ and $\mathrm{sgn}(\mathbf{q}_j)$ . However, since $\mathbf{q}^\top \mathbf{C}$ must equal $\mathbf{q}^\top$ (which is zero everywhere except for entries $i$ and $j$ ) and the diagonal of $\mathbf{C}^*$ must be zero, the only way this can be achieved is for the two rows to have non-overlapping non-zero support, proving that the only non-zero entries of $\mathbf{C}$ must be $\mathbf{C}_{i,j}$ and $\mathbf{C}_{j,i}$ which take values in $\{-1,1\}$ , depending on the choice of the signs for $\mathbf{q}_i$ and $\mathbf{q}_j$ . The result is completed by noting that since we require $\mathbf{Z}^* = \mathbf{Z}^*\mathbf{C}^*$ , then $\mathbf{Z}_i^*$ and $\mathbf{Z}_j^*$ must be equal to within a sign-flip depending on the choice of the signs of the $\mathbf{q}$ vector.
|
| 442 |
+
|
| 443 |
+
# 5.4 PROOF OF THEOREM 3
|
| 444 |
+
|
| 445 |
+
Theorem 3. Optimal solutions to the problems
|
| 446 |
+
|
| 447 |
+
(P1) $\min_{\mathbf{Z},\mathbf{C}}\| \mathbf{C}\|_{\mathcal{S}_p}$ s.t. $\mathbf{Z} = \mathbf{Z}\mathbf{C}$ , $\| \mathbf{Z}\| _F^2\geq \tau$ (36)
|
| 448 |
+
(P2) $\min_{\mathbf{Z},\mathbf{C}}\| \mathbf{C}\|_{\mathcal{S}_p}$ s.t. $\mathbf{Z} = \mathbf{Z}\mathbf{C}$ , $\| \mathbf{Z}^i\| _F^2\geq \frac{\tau}{d}\forall i$ (37)
|
| 449 |
+
|
| 450 |
+
where $\| \mathbf{C}\|_{S_p}$ is any Schatten- $p$ norm on $\mathbf{C}$ , are characterized by the set
|
| 451 |
+
|
| 452 |
+
$$
|
| 453 |
+
\left(\mathbf {Z} ^ {*}, \mathbf {C} ^ {*}\right) \in \left\{\left(\mathbf {z q} ^ {\top}\right) \times \left(\mathbf {q q} ^ {\top}\right): \mathbf {q} \in \mathbb {R} ^ {N}, \| \mathbf {q} \| _ {F} = 1 \right\} \tag {38}
|
| 454 |
+
$$
|
| 455 |
+
|
| 456 |
+
where $\mathbf{z} \in \mathbb{R}^d$ is an arbitrary vector which satisfies $\| \mathbf{z} \|_F^2 \geq \tau$ for (P1) and $\mathbf{z}_i^2 \geq \frac{\tau}{d}$ , $\forall i$ for (P2).
|
| 457 |
+
|
| 458 |
+
Proof. To begin, by the same arguments as in Theorem 2 we consider an optimization problem similar to (29) but for $\theta (\mathbf{C}) = \| \mathbf{C}\|_{S_p}$ being any Schatten- $p$ norm and with the $\| \mathbf{q}\| _0$ constraint replaced by a $\mathbf{q}\neq \mathbf{0}$ constraint:
|
| 459 |
+
|
| 460 |
+
$$
|
| 461 |
+
\min _ {\mathbf {C}, \mathbf {q}} \| \mathbf {C} \| _ {\mathcal {S} _ {p}} \text {s . t .} \mathbf {q} ^ {\top} (\mathbf {C} - \mathbf {I}) = \mathbf {0}, \quad \mathbf {q} \neq \mathbf {0}. \tag {39}
|
| 462 |
+
$$
|
| 463 |
+
|
| 464 |
+
Again forming the Lagrangian for $\mathbf{C}$ with $\mathbf{q}$ fixed we have:
|
| 465 |
+
|
| 466 |
+
$\min_{\mathbf{C}}\left\{L(\mathbf{C},\Lambda) = \| \mathbf{C}\|_{S_p} + \langle \Lambda ,(\mathbf{I} - \mathbf{C}^\top)\mathbf{q}\rangle \right\} =$ (40)
|
| 467 |
+
|
| 468 |
+
$\min_{\mathbf{C}}\| \mathbf{C}\|_{\mathcal{S}_p} - \langle \mathbf{q}\Lambda^\top ,\mathbf{C}\rangle +\langle \Lambda ,\mathbf{q}\rangle = \langle \Lambda ,\mathbf{q}\rangle -\delta (\| \mathbf{q}\Lambda^\top \|_{\mathcal{S}_p}^\circ \leq 1)$ (41)
|
| 469 |
+
|
| 470 |
+
which implies the dual problem is:
|
| 471 |
+
|
| 472 |
+
$$
|
| 473 |
+
\max _ {\Lambda} \left\langle \Lambda , \mathbf {q} \right\rangle \text {s . t .} \left\| \mathbf {q} \Lambda^ {\top} \right\| _ {\mathcal {S} _ {p}} ^ {\circ} \leq 1 \tag {42}
|
| 474 |
+
$$
|
| 475 |
+
|
| 476 |
+
where $\| \cdot \|_{S_p}^{\circ}$ denotes the dual norm. Note that for any Schatten- $p$ norm, the dual norm is again a Schatten- $p$ norm, but since we only evaluate the norm on rank-1 matrices this is equal to the Frobenius norm for all values of $p$ . As a result we have for all choices of Schatten- $p$ norm that the dual problem is equivalent to:
|
| 477 |
+
|
| 478 |
+
$$
|
| 479 |
+
\max _ {\Lambda} \left\{\langle \Lambda , \mathbf {q} \rangle \text {s . t .} \| \mathbf {q} \Lambda^ {\top} \| _ {F} \leq 1 \right\} = \max _ {\Lambda} \left\{\langle \Lambda , \mathbf {q} \rangle \text {s . t .} \| \mathbf {q} \| _ {F} \| \Lambda \| _ {F} \leq 1 \right\} \tag {43}
|
| 480 |
+
$$
|
| 481 |
+
|
| 482 |
+
From the above, one can easily see that the optimal choice for $\Lambda$ is given as $\Lambda^{*} = \frac{\mathbf{q}}{\|\mathbf{q}\|_{F}^{2}}$ and the optimal objective value is 1. Further note that from primal optimality in (41) we must have that $\mathbf{q}(\Lambda^{*})^{\top} \in \partial \| \mathbf{C}^{*}\|_{\mathcal{S}_{p}}$ which implies that $\langle \mathbf{C}^{*}, \mathbf{q}(\Lambda^{*})^{\top} \rangle = \| \mathbf{C}^{*}\|_{\mathcal{S}_{p}}\| \mathbf{q}(\Lambda^{*})^{\top}\|_{\mathcal{S}_{p}}^{\circ} = \| \mathbf{C}^{*}\|_{\mathcal{S}_{p}} = 1$ . As a result, we have that $\mathbf{C}^{*} = \mathbf{q}(\Lambda^{*})^{\top}$ by the Cauchy-Schwarz inequality and the fact that $\| \mathbf{q}(\Lambda^{*})^{\top}\|_{F} = 1$ . Thus since $\mathbf{C}^{*}$ is a rank-1 matrix then $\mathbf{C}^{*} - \mathbf{I}$ can only have one singular value equal to 0, so all the rows of $\mathbf{Z}^{*}$ must be equal to a scaling of $\mathbf{q}$ . Given this structure for $\mathbf{Z}^{*}$ the result then follows, where we also recall that $\| \mathbf{C}^{*}\|_{\mathcal{S}_{p}} = 1$ , which implies the constraints on $\mathbf{q}$ in the statement.
|
| 483 |
+
|
| 484 |
+
# 5.5 PROOF OF THEOREM 4
|
| 485 |
+
|
| 486 |
+
We now present the proof for Theorem 4. Before proving the main result we first prove a simple Lemma which will be helpful.
|
| 487 |
+
|
| 488 |
+
Lemma 1. For a given matrix $\mathbf{Z} \in \mathbb{R}^{d \times N}$ and vector $\mathbf{z} \in \mathbb{R}^d$ such that $\| \mathbf{Z}_i \|_F = \tau \forall i$ and $\| \mathbf{z} \|_F = \tau$ , let $k \in [0, N]$ be the number of columns in $\mathbf{Z}$ which are equal to $\mathbf{z}$ to within a sign-flip (i.e., $\mathbf{Z}_i = \pm \mathbf{z}$ ). Then, if $k \geq 1$ the following holds:
|
| 489 |
+
|
| 490 |
+
$$
|
| 491 |
+
\min _ {\mathbf {c}} \left\{\| \mathbf {c} \| _ {1} \text {s . t .} \mathbf {z} = \mathbf {Z c} \right\} = 1 \tag {44}
|
| 492 |
+
$$
|
| 493 |
+
|
| 494 |
+
and $\mathbf{c}_i^* \neq 0 \Rightarrow \mathbf{Z}_i = \pm \mathbf{z}$ .
|
| 495 |
+
|
| 496 |
+
Further, if $k = 0$ we also have
|
| 497 |
+
|
| 498 |
+
$$
|
| 499 |
+
\min _ {\mathbf {c}} \left\{\| \mathbf {c} \| _ {1} \text {s . t .} \mathbf {z} = \mathbf {Z c} \right\} > 1 \tag {45}
|
| 500 |
+
$$
|
| 501 |
+
|
| 502 |
+
(where we use the convention that the objective takes value $\infty$ if $\mathbf{z} = \mathbf{Z}\mathbf{c}$ has no feasible solution.)
|
| 503 |
+
|
| 504 |
+
Proof. Without loss of generality, assume the columns of $\mathbf{Z}$ are permuted so that $\mathbf{Z}$ has the form:
|
| 505 |
+
|
| 506 |
+
$$
|
| 507 |
+
\mathbf {Z} = \left[ \mathbf {z s} ^ {\top}, \bar {\mathbf {Z}} \right] \tag {46}
|
| 508 |
+
$$
|
| 509 |
+
|
| 510 |
+
where $\mathbf{s} \in \{-1, 1\}^k$ is a vector with $k \in [0, N]$ elements each taking value $-1$ or $1$ , and $\bar{\mathbf{Z}} \in \mathbb{R}^{d \times (N - k)}$ contains all the columns of $\mathbf{Z}$ which are not equal to $\pm \mathbf{z}$ .
|
| 511 |
+
|
| 512 |
+
First we consider the $k \geq 1$ case and note that the Lagrangian of (44) is given as:
|
| 513 |
+
|
| 514 |
+
$$
|
| 515 |
+
L (\mathbf {c}, \Lambda) = \| \mathbf {c} \| _ {1} + \langle \Lambda , \mathbf {z} - \mathbf {Z c} \rangle \tag {47}
|
| 516 |
+
$$
|
| 517 |
+
|
| 518 |
+
Now minimizing $L$ w.r.t. $\mathbf{c}$ gives
|
| 519 |
+
|
| 520 |
+
$$
|
| 521 |
+
\min _ {\mathbf {c}} \| \mathbf {c} \| _ {1} - \langle \mathbf {Z} ^ {\top} \Lambda , \mathbf {c} \rangle = - \delta \left(\| \mathbf {Z} ^ {\top} \Lambda \| _ {\infty} \leq 1\right) \tag {48}
|
| 522 |
+
$$
|
| 523 |
+
|
| 524 |
+
which gives that the dual problem to (44) (with $k \geq 1$ ) is given by
|
| 525 |
+
|
| 526 |
+
$$
|
| 527 |
+
\max _ {\Lambda} \langle \Lambda , \mathbf {z} \rangle \text {s . t .} \| \Lambda^ {\top} \mathbf {Z} \| _ {\infty} \leq 1 \Longleftrightarrow \max _ {\Lambda} \langle \Lambda , \mathbf {z} \rangle \text {s . t .} | \langle \Lambda , \mathbf {Z} _ {i} \rangle | \leq 1, \forall i \in [ 1, N ] \Longleftrightarrow \tag {49}
|
| 528 |
+
$$
|
| 529 |
+
|
| 530 |
+
$$
|
| 531 |
+
\max _ {\Lambda} \langle \Lambda , \mathbf {z} \rangle \text {s . t .} | \langle \Lambda , \mathbf {z} \rangle | \leq 1, | \langle \Lambda , \bar {\mathbf {Z}} _ {i} \rangle | \leq 1, \forall i \in [ 1, N - k ]
|
| 532 |
+
$$
|
| 533 |
+
|
| 534 |
+
where the final equivalence is due to the special structure of $\mathbf{Z}$ in (46). Clearly from (49) and the fact that $\| \mathbf{z}\| _F = \| \bar{\mathbf{Z}}_i\| _F = \tau$ , $\forall i$ it is easily seen that an optimal $\Lambda$ is any vector such that $\langle \Lambda^{*},\mathbf{z}\rangle = 1$ , so as a result we have that the optimal solution to the problem in (44) has objective value 1. Further, note that when $k\geq 1$ , then due to the triangle inequality and the fact that all the vectors in $\mathbf{Z}$ have equal norm we can only achieve $\mathbf{z} = \mathbf{Z}\mathbf{c}^*$ with $\| \mathbf{c}^*\| _1 = 1$ if all the non-zero entries of $\mathbf{c}$ are in the first $k$ entries and the sign of any non-zero element of $\mathbf{c}^*$ must satisfy $sgn(\mathbf{c}_i^*) = \mathbf{s}_i$ , $i\in [1,k]$ .
|
| 535 |
+
|
| 536 |
+
To see that (45) is true, first note that an optimal solution to (49) with $k \geq 1$ is to choose $\Lambda^{*} = \mathbf{z}\tau^{-2}$ and that because $\bar{\mathbf{Z}}_i \neq \pm \mathbf{z}, \forall i$ we have $|\langle \bar{\mathbf{Z}}_i, \Lambda^* \rangle| = |\langle \bar{\mathbf{Z}}_i, \mathbf{z}\tau^{-2} \rangle| < \| \bar{\mathbf{Z}}_i\|_F\| \mathbf{z}\tau^{-2}\|_F = 1$ . Further, note that the problem in (45) (with $k = 0$ ) will have an equivalent dual problem to (49), with the $|\langle \Lambda, \mathbf{z} \rangle| \leq 1$ constraint removed, which shows the inequality, as we can always take $\Lambda = \alpha \mathbf{z}\tau^{-2}$ for some $\alpha > 1$ and remain dual feasible, giving a dual value (and hence optimal objective value) for (45) strictly greater than 1.
|
| 537 |
+
|
| 538 |
+
With this result we are now ready to prove Theorem 4.
|
| 539 |
+
|
| 540 |
+
Theorem 4. Optimal solutions to the problem
|
| 541 |
+
|
| 542 |
+
$$
|
| 543 |
+
\min _ {\mathbf {Z}, \mathbf {C}} \| \mathbf {C} \| _ {1} \text {s . t .} \operatorname {d i a g} (\mathbf {C}) = \mathbf {0}, \quad \mathbf {Z} = \mathbf {Z} \mathbf {C}, \quad \| \mathbf {Z} _ {i} \| _ {F} ^ {2} = \tau \forall i \tag {50}
|
| 544 |
+
$$
|
| 545 |
+
|
| 546 |
+
must have the property that for any column in $\mathbf{Z}^*$ , $\mathbf{Z}_i^*$ , there exists another column, $\mathbf{Z}_j^* (i \neq j)$ , such that $\mathbf{Z}_i^* = s_{i,j}\mathbf{Z}_j^*$ where $s_{i,j} \in \{-1,1\}$ . Further, $\|\mathbf{C}_i^*\|_1 = 1$ $\forall i$ and $\mathbf{C}_{i,j} \neq 0 \Rightarrow \mathbf{Z}_i^* = \pm \mathbf{Z}_j^*$ .
|
| 547 |
+
|
| 548 |
+
Proof. First, note that any $\mathbf{Z}^*$ which satisfies the conditions of the Theorem achieves optimal objective value with $\| \mathbf{C}_i^*\| _1 = 1$ , $\forall i$ and $\mathbf{C}_{i,j}\neq 0\Rightarrow \mathbf{Z}_i^* = \pm \mathbf{Z}_j^*$ directly from Lemma 1 since when we are finding an optimal $\mathbf{C}_i$ encoding for column $\mathbf{Z}_i^*$ there must exist another column in $\mathbf{Z}^*$ which is equal to $\mathbf{Z}_i^*$ to within a sign-flip ( $k\geq 1$ in Lemma 1).
|
| 549 |
+
|
| 550 |
+
To show that this is optimal, we will proceed by contradiction and assume we have a feasible pair of matrices $(\tilde{\mathbf{Z}},\tilde{\mathbf{C}})$ which does not satisfy the conditions of the Theorem but $\| \tilde{\mathbf{C}}\| _1\leq N = \| \mathbf{C}^*\| _1$ Note that because $\tilde{\mathbf{Z}}$ does not satisfy the conditions of the Theorem this implies that at least one column of $\tilde{\mathbf{Z}}$ must be distinct (i.e., $\exists i:\tilde{\mathbf{Z}}_i\neq \pm \tilde{\mathbf{Z}}_j,\forall j\neq i$ ). As a result, for any column $\tilde{\mathbf{Z}}_i$ which is distinct we must have $\| \tilde{\mathbf{C}}_i\| _1 > 1$ from Lemma 1 ( $k = 0$ case). If we let $\mathcal{I}$ denote the set of indices of the distinct columns in $\tilde{\mathbf{Z}}$ and $\mathcal{I}^{\circ}$ the compliment of $\mathcal{I}$ we then have
|
| 551 |
+
|
| 552 |
+
$$
|
| 553 |
+
\begin{array}{l} \left\| \tilde {\mathbf {C}} _ {i} \right\| _ {1} = \sum_ {i \in \mathcal {I}} \left\| \tilde {\mathbf {C}} _ {i} \right\| _ {1} + \sum_ {j \in \mathcal {I} ^ {\circ}} \left\| \tilde {\mathbf {C}} _ {j} \right\| _ {1} (51) \\ = \sum_ {i \in \mathcal {I}} \| \tilde {\mathbf {C}} _ {i} \| _ {1} + | \mathcal {I} ^ {\circ} | (52) \\ > \left| \mathcal {I} \right| + \left| \mathcal {I} ^ {\circ} \right| = N (53) \\ \end{array}
|
| 554 |
+
$$
|
| 555 |
+
|
| 556 |
+
where the first equality comes from noting that for any $\tilde{\mathbf{Z}}_j$ , $j \in \mathcal{I}^\circ$ corresponds to the $k \geq 1$ situation in Lemma 1 and the inequality comes from noting that any $\tilde{\mathbf{Z}}_i$ , $i \in \mathcal{I}$ corresponds to the $k = 0$ situation in Lemma 1 and the fact that $|\mathcal{I}| \geq 1$ . We thus have the contradiction and the result is completed.
|
| 557 |
+
|
| 558 |
+
# 5.6 PROOF OF PROPOSITION 2
|
| 559 |
+
|
| 560 |
+
Proposition 2. Consider encoder and decoder networks with the form given in (14). Then, given any dataset $\mathbf{X} \in \mathbb{R}^{d_x \times N}$ where the linear operators in both the encoder/decoder can express identity on $\mathbf{X}$ and any $\tau > 0$ there exist network parameters $(\mathcal{W}_e, \mathcal{W}_d)$ which satisfy the following:
|
| 561 |
+
|
| 562 |
+
1. Embedded points are arbitrarily close: $\| \Phi_E(\mathbf{X}_i,\mathcal{W}_e) - \Phi_E(\mathbf{X}_j,\mathcal{W}_e)\| \leq \epsilon \forall (i,j)$ and $\forall \epsilon >0$
|
| 563 |
+
2. Embedded points have norm arbitrarily close to $\tau$ : $||\Phi_E(\mathbf{X}_i, \mathcal{W}_e)||_F - \tau| \leq \epsilon \forall i$ and $\forall \epsilon > 0$ .
|
| 564 |
+
3. Embedded points can be decoded exactly: $\Phi_D(\Phi_E(\mathbf{X}_i, \mathcal{W}_e), \mathcal{W}_d) = \mathbf{X}_i$ , $\forall i$ .
|
| 565 |
+
|
| 566 |
+
Proof. To begin, let $(\tilde{\mathbf{W}}_e^1,\tilde{\mathbf{W}}_e^2)$ and $(\tilde{\mathbf{W}}_d^1,\tilde{\mathbf{W}}_d^2)$ be choices of linear operator parameters such that $\tilde{\mathbf{W}}_e^2\tilde{\mathbf{W}}_e^1\mathbf{X} = \tilde{\mathbf{W}}_d^2\tilde{\mathbf{W}}_d^1\mathbf{X} = \mathbf{X}$ which always must exist since the operators can express identity on $\mathbf{X}$ . Now, for an arbitrary $\alpha >0$ let $\tilde{\mathbf{b}}_e^1$ be any vector such that $\alpha \tilde{\mathbf{W}}_e^1\mathbf{X}_i + \mathbf{b}_e^1$ is non-negative for all $i$ (note that this is always possible by taking $\tilde{\mathbf{b}}_e^1$ to be a sufficiently large non-negative vector). Note that now if we choose $\tilde{\mathbf{b}}_e^2 = \mathbf{0}$ we then have $\forall i$ and all $\beta >0$ :
|
| 567 |
+
|
| 568 |
+
$$
|
| 569 |
+
\left(\beta \tilde {\mathbf {W}} _ {e} ^ {2}\right) \left(\alpha \tilde {\mathbf {W}} _ {e} ^ {1} \mathbf {X} _ {i} + \tilde {\mathbf {b}} _ {e} ^ {1}\right) _ {+} + \tilde {\mathbf {b}} _ {e} ^ {2} = \left(\beta \tilde {\mathbf {W}} _ {e} ^ {2}\right) \left(\alpha \tilde {\mathbf {W}} ^ {1} \mathbf {X} _ {i} + \tilde {\mathbf {b}} _ {e} ^ {1}\right) = \alpha \beta \mathbf {X} _ {i} + \beta \tilde {\mathbf {W}} _ {e} ^ {2} \tilde {\mathbf {b}} _ {e} ^ {1} \tag {54}
|
| 570 |
+
$$
|
| 571 |
+
|
| 572 |
+
where the ReLU function becomes an identity operator due to the fact that we have all non-negative entries. Likewise, we can choose $\tilde{\mathbf{b}}_d^1$ to be an arbitrary vector such that $(\beta^{-1}\tilde{\mathbf{W}}_d^1)(\alpha \beta \mathbf{X}_i + \beta \mathbf{W}_e^2\tilde{\mathbf{b}}_e^1) + \tilde{\mathbf{b}}_d^1$ is non-negative for all $\mathbf{X}_i$ , so if we choose $\tilde{\mathbf{b}}_d^2 = -(\alpha^{-1}\tilde{\mathbf{W}}_d^2)[\tilde{\mathbf{W}}_d^1\tilde{\mathbf{W}}_e^2\tilde{\mathbf{b}}_e^1 +\tilde{\mathbf{b}}_d^1]$ we then have:
|
| 573 |
+
|
| 574 |
+
$$
|
| 575 |
+
\begin{array}{l} \left. \left(\alpha^ {- 1} \tilde {\mathbf {W}} _ {d} ^ {2}\right) \left[ \left(\beta^ {- 1} \tilde {\mathbf {W}} _ {d} ^ {1}\right) \left(\alpha \boldsymbol {\beta} \mathbf {X} _ {i} + \boldsymbol {\beta} \tilde {\mathbf {W}} _ {e} ^ {2} \tilde {\mathbf {b}} _ {e} ^ {1}\right) + \tilde {\mathbf {b}} _ {d} ^ {1} \right] _ {+} + \tilde {\mathbf {b}} _ {d} ^ {2} \right. \\ = \left(\alpha^ {- 1} \tilde {\mathbf {W}} _ {d} ^ {2}\right) \left[ \left(\beta^ {- 1} \tilde {\mathbf {W}} _ {d} ^ {1}\right) \left(\alpha \beta \mathbf {X} _ {i} + \beta \tilde {\mathbf {W}} _ {e} ^ {2} \tilde {\mathbf {b}} _ {e} ^ {1}\right) + \tilde {\mathbf {b}} _ {d} ^ {1} \right] + \tilde {\mathbf {b}} _ {d} ^ {2} \tag {55} \\ = \tilde {\mathbf {W}} _ {d} ^ {2} \tilde {\mathbf {W}} _ {d} ^ {1} \mathbf {X} _ {i} + \left(\alpha^ {- 1} \tilde {\mathbf {W}} _ {d} ^ {2}\right) \left[ \tilde {\mathbf {W}} _ {d} ^ {1} \tilde {\mathbf {W}} _ {e} ^ {2} \tilde {\mathbf {b}} _ {e} ^ {1} + \tilde {\mathbf {b}} _ {d} ^ {1} \right] + \tilde {\mathbf {b}} _ {d} ^ {2} \\ = \mathbf {X} _ {i} \\ \end{array}
|
| 576 |
+
$$
|
| 577 |
+
|
| 578 |
+
So as a result we have constructed a set of encoder/decoder weights which satisfies the third condition of the statement. Further, the embedded points in this construction are of the form
|
| 579 |
+
|
| 580 |
+
$$
|
| 581 |
+
\mathbf {Z} _ {i} = \alpha \beta \mathbf {X} _ {i} + \beta \tilde {\mathbf {W}} _ {e} ^ {2} \tilde {\mathbf {b}} _ {e} ^ {1} \tag {56}
|
| 582 |
+
$$
|
| 583 |
+
|
| 584 |
+
so since we can form such a construction for an arbitrary $\alpha > 0$ and $\beta > 0$ we can choose $\alpha \to 0$ arbitrarily small and $\beta = \tau \| \tilde{\mathbf{W}}_e^2 \tilde{\mathbf{b}}_e^1 \|_F^{-1}$ to give that all the embedded points $\mathbf{Z}_i$ are arbitrarily close to the point $\tau \tilde{\mathbf{W}}_e^2 \tilde{\mathbf{b}}_e^1 \| \tilde{\mathbf{W}}_e^2 \tilde{\mathbf{b}}_e^1 \|_F^{-1}$ , which completes the result.
|
| 585 |
+
|
| 586 |
+
# 6 ADDITIONAL RESULTS AND DETAILS
|
| 587 |
+
|
| 588 |
+
Here we give a few additional results which expand on results given in the main paper along with extra details regarding our experiments.
|
| 589 |
+
|
| 590 |
+
# 6.1 EXPERIMENTS WITH REAL DATA
|
| 591 |
+
|
| 592 |
+
In addition to the results we show in the main paper, we also present additional experimental results on real data. In particular Figure 4 (Left) shows the magnitude of the embedded representation, $\mathbf{Z}$ , using the original code from Ji et al. (2017) to solve model (3) using the YaleB dataset (38 faces). Note that the optimization never reaches a stationary point with the magnitude of the embedded representation continually decreasing (as predicted by Proposition 1). Additionally, if one looks at the singular values (normalized by the largest singular value) for the embedding of data points from one class (Right), then training the autoencoder without the $F(\mathbf{Z},\mathbf{C})$ term results in a geometry that is closer to a linear subspace. Further, the raw data is actually closer to a linear subspace than after training the full SEDSC model (comparing Red and Blue curves). Interestingly, the fact that the autoencoder features and raw data is closer to a linear subspace than SEDSC is also consistent with the clustering performance we show in Table 1, where for the setting without the post-processing the autoencoder-only features achieve the best clustering results, followed by the raw data, followed by SEDSC.
|
| 593 |
+
|
| 594 |
+

|
| 595 |
+
Figure 4: Experiments on Extended Yale B dataset. (Left) The norm of the embedded representation $\mathbf{Z}$ as training proceeds. (Right) The singular values of the embedded representation of points from one class, normalized by the largest singular value. (Raw) The singular values of the raw data. (AE) The singular values of the embedded representation from an autoencoder trained without the $F(\mathbf{Z},\mathbf{C})$ term. (SEDSC) The singular values of the embedded representation from the full SEDSC model (3).
|
| 596 |
+
|
| 597 |
+

|
| 598 |
+
|
| 599 |
+
Details of experiments with real data. We use the code provided by the authors of Ji et al. $(2017)^{3}$ . The code implements the model in (3) with $\theta (\mathbf{C}) = \frac{1}{2}\| \mathbf{C}\| _F^2$ and $\ell (\cdot ,\cdot)$ being the squared loss. The training procedure, as described in Ji et al. (2017), involves pre-training an antoencoder network without the $F(\mathbf{Z},\mathbf{C})$ term. Such pretrained models for each of the three datasets are also provided alongside with their code. Then, the encoder and decoder networks of SEDSC are initialized by the pre-trained networks and all model parameters are trained via the Adam optimizer.
|
| 600 |
+
|
| 601 |
+
The implementation details of the three methods reported in Figure 2 and Table 1, namely Raw Data, Autoenc only and Full SEDSC, are as follows. For Raw Data, we solve the model in (1) with $\theta (\mathbf{C}) = \frac{1}{2}\| \mathbf{C}\| _F^2$ and $\lambda$ chosen in the set $\{0.5,1,2,5,10,20,50,100,200,500\}$ that gives the highest averaged clustering accuracy over 10 independent trials. For Autoenc only, we use the pretrained encoder and decoder networks to initialize SEDSC, then freeze the encoder and decoder networks and use Adam to optimize the variable C only. The results for Full SEDSC are generated by running the code as it is. Finally, the same post-processing step is adopted for all three methods (i.e., we do not fine-tune it for Raw Data and Autoenc only).
|
| 602 |
+
|
| 603 |
+
# 6.2 EXPERIMENTS WITH SYNTHETIC DATA
|
| 604 |
+
|
| 605 |
+
In addition to the results shown in the main paper we additionally conduct similar experiments with synthetic data for the Instance Normalization and the Batch/Channel Normalization scheme.
|
| 606 |
+
|
| 607 |
+
Details of experiments with real data. For the Dataset and Batch/Channel normalization experiments we directly add a normalization operator to the encoder network which normalizes the output of the encoder such that the entire dataset has unit Frobenius norm (Dataset Normalization) or each row of the embedded dataset has unit norm (Batch/Channel Normalization) before passing to the self-expressive layer. For the Instance Normalization setting we add the regularization term proposed in Peng et al. (2017) with the form $\gamma_2\sum_{i=1}^N (\mathbf{Z}_i^\top \mathbf{Z}_i - 1)^2$ to the objective in (3). We use regularization hyper-parameters $(\lambda, \gamma) = (10^{-4}, 2)$ for all cases and $\gamma_2 = 10^{-4}$ for the Instance Normalization case.
|
| 608 |
+
|
| 609 |
+
We first pretrain the autoencoder without the $F(\mathbf{Z},\mathbf{C})$ term (i.e., $\gamma = 0$ and $\mathbf{C}$ fixed at $\mathbf{I}$ ), and we initialize the $\mathbf{C}$ matrix to be the solution to (1) using the $\mathbf{Z}$ embedding from the pretrained autoencoder. Following this we perform standard proximal gradient descent (Parikh & Boyd, 2013) on the full dataset, where we take a gradient descent step on all of the model parameters for the full objective excluding the $\theta (\mathbf{C})$ term, then we solve the proximal operator for $\theta (\dot{\mathbf{C}})$ . Figure 5 shows the results of this experiment, where we plot the original dataset along with the reconstructed output of
|
| 610 |
+
|
| 611 |
+

|
| 612 |
+
Figure 5: Showing results for the synthetic dataset for three normalization schemes (along the rows). Instance Normalization (top); Dataset Normalization (center); Batch/Channel Normalization (bottom). The columns are the same as described in the main paper. (Left) Original data points (Blue) and the data points at the output of the autoencoder when the full model (3) is used (Red). (Center Left) Data representation in the embedded domain when just the autoencoder is trained without the $F(\mathbf{Z}, \mathbf{C})$ term (Blue) and the full model is used (Red). (Center Right) The absolute value of the recovered C encoding matrix when trained with the full model. (Right) Same plot as the previous column but with a logarithmic color scale to visualize small entries.
|
| 613 |
+
|
| 614 |
+
the autoencoder (Left), the embedded representation after pretraining the autoencoder (Left Center-Blue) and after fully training the model (Left Center-Red), the absolute value of the final C matrix (Right Center), and the same plot with a logarithmic color scale to better visualize small entries (Right).
|
| 615 |
+
|
| 616 |
+
From Figure 5 one can see that our theoretical predictions are largely confirmed experimentally. Namely, first examining the Batch and Dataset normalization experiments one sees that when the full SEDSC model is trained the embedded representation is very close to as predicted by Theorem 2, with almost all of the embedded points (Left Center - Red points) being near the origin with the exception of two points, which are co-linear with each other. Likewise, the C matrix is dominated by two non-zero entries with the remaining non-zero entries only appearing on the log-scale color scale. Further, the Instance normalization experiment also largely confirms our theoretical predictions, where all the points are co-linear and largely identical copies of a single point.
|
acritiqueofselfexpressivedeepsubspaceclustering/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2c7b91cd5834331e82cabb41641b071994c9d4cd8365bae13e88f3ac2e9d4029
|
| 3 |
+
size 723124
|
acritiqueofselfexpressivedeepsubspaceclustering/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c6cde6be1222df769f84ca4ea493352896980ddb43085fc6b6d2c162e42fc6c
|
| 3 |
+
size 1027931
|
actingindelayedenvironmentswithnonstationarymarkovpolicies/eec68046-21c8-474a-9ba0-d6a0431e3a97_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7e2ab80e8999ea7a06a4877ddc7e575119538e6d3a4ce4512bf4e34051e89e4b
|
| 3 |
+
size 72138
|
actingindelayedenvironmentswithnonstationarymarkovpolicies/eec68046-21c8-474a-9ba0-d6a0431e3a97_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:68319ffc2b41cafcc1c481ee7774f71f719438876dda2558ea12e84e4b9a9d7b
|
| 3 |
+
size 85680
|
actingindelayedenvironmentswithnonstationarymarkovpolicies/eec68046-21c8-474a-9ba0-d6a0431e3a97_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e36804903a5b70e9f2c0bdfc29d90e9ff52831c147e022992b53052e268d1ea2
|
| 3 |
+
size 2094389
|
actingindelayedenvironmentswithnonstationarymarkovpolicies/full.md
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ACTING IN DELAYED ENVIRONMENTS WITH NON-STATIONARY MARKOV POLICIES
|
| 2 |
+
|
| 3 |
+
Esther Derman*
|
| 4 |
+
|
| 5 |
+
Technion
|
| 6 |
+
|
| 7 |
+
estherderman@campus.technion.ac.il
|
| 8 |
+
|
| 9 |
+
Gal Dalal*
|
| 10 |
+
|
| 11 |
+
Nvidia Research
|
| 12 |
+
|
| 13 |
+
gdalal@nvidia.com
|
| 14 |
+
|
| 15 |
+
# Shie Mannor
|
| 16 |
+
|
| 17 |
+
Nvidia Research & Technion
|
| 18 |
+
|
| 19 |
+
shie@ee.technion.ac.il
|
| 20 |
+
|
| 21 |
+
# ABSTRACT
|
| 22 |
+
|
| 23 |
+
The standard Markov Decision Process (MDP) formulation hinges on the assumption that an action is executed immediately after it was chosen. However, assuming it is often unrealistic and can lead to catastrophic failures in applications such as robotic manipulation, cloud computing, and finance. We introduce a framework for learning and planning in MDPs where the decision-maker commits actions that are executed with a delay of $m$ steps. The brute-force state augmentation baseline where the state is concatenated to the last $m$ committed actions suffers from an exponential complexity in $m$ , as we show for policy iteration. We then prove that with execution delay, deterministic Markov policies in the original state-space are sufficient for attaining maximal reward, but need to be non-stationary. As for stationary Markov policies, we show they are sub-optimal in general. Consequently, we devise a non-stationary Q-learning style model-based algorithm that solves delayed execution tasks without resorting to state-augmentation. Experiments on tabular, physical, and Atari domains reveal that it converges quickly to high performance even for substantial delays, while standard approaches that either ignore the delay or rely on state-augmentation struggle or fail due to divergence. The code is available at https://github.com/qaldl/r1_delay/basic.git.
|
| 24 |
+
|
| 25 |
+
# 1 INTRODUCTION
|
| 26 |
+
|
| 27 |
+
The body of work on reinforcement learning (RL) and planning problem setups has grown vast in recent decades. Examples for such distinctions are different objectives and constraints, assumptions on access to the model or logged trajectories, on-policy or off-policy paradigms, etc. (Puterman 2014). However, the study of delay in RL remains scarce. It is almost always assumed the action is executed as soon as the agent chooses it. This assumption seldom holds in real-world applications (Dulac-Arnold et al. 2019). Latency in action execution can either stem from the increasing computational complexity of modern systems and related tasks, or the infrastructure itself. The wide range of such applications includes robotic manipulation, cloud computing, financial trading, sensor feedback in autonomous systems, and more. To elaborate, consider an autonomous vehicle required for immediate response to a sudden hazard on the highway. Driving at high speed, it suffers from perception module latency when inferring the surrounding scene, as well as delay in actuation once a decision has been made. While the latter phenomenon is an instance of execution delay, the former corresponds to observation delay. These two types of delay are in fact equivalent and can thus be treated with the same tools (Katsikopoulos & Engelbrecht 2003).
|
| 28 |
+
|
| 29 |
+
Related works. The notion of delay is prominent in control theory with linear time-invariant systems (Bar-Ilan & Sulem 1995; Dugard & Verriest 1998; Richard 2003; Fridman 2014; Bruder & Pham 2009). While the delayed control literature is vast, our work intersects with it mostly in motivation. In the above control theory formulations, the system evolves according to some known diffusion or stochastic differential equation. Differently, the discrete-time MDP framework does not require any structural assumption on the transition function or reward.
|
| 30 |
+
|
| 31 |
+
A few works consider a delay in the reward signal rather than in observation or execution. Delayed reward has been studied on multi-armed bandits for deterministic and stochastic latencies (Joulani et al., 2013) and for the resulting arm credit assignment problem (Pike-Burke et al., 2017). In the MDP setting, Campbell et al. (2016) proposed a Q-learning variant for reward-delay that follows a Poisson distribution. Katsikopoulos & Engelbrecht (2003) considered three types of delay: observation, execution, and reward. Chen et al. (2020b) studied execution delay on multi-agent systems. The above works on MDPs employed state-augmentation with a primary focus on empirical evaluation of the degradation introduced by the delay. In this augmentation method, all missing information is concatenated with the original state to overcome the partial observability induced by the delay. The main drawback of this embedding method is the exponential growth of the state-space with the delay value (Walsh et al., 2009; Chen et al., 2020a) and, in the case of (Chen et al., 2020b), an additional growth that is polynomial with the number of agents.
|
| 32 |
+
|
| 33 |
+
Walsh et al. (2009) avoided state-augmentation in MDPs with delayed feedback via a planning approach. By assuming the transition kernel to be close to deterministic, their model-based simulation (MBS) algorithm relies on a most-likely present state estimate. Since the Delayed-Q algorithm we devise here resembles to MBS in spirit, we highlight crucial differences between them: First, MBS is a conceptual algorithm that requires the state-space to be finite or discretized. This makes it highly sensitive to the state-space size, as we shall demonstrate in Sec. [Fig. 5(c)], prohibiting it from running on domains like Atari. Differently, Delayed-Q works with the original, possibly continuous state-space. Second, MBS is an offline algorithm: it estimates a surrogate, non-delayed MDP from samples, and only then does it solve that MDP to obtain the optimal policy (Walsh et al., 2009) [Alg. 2, 1.16]. This is inapplicable to large continuous domains and is again in contrast to Delayed-Q.
|
| 34 |
+
|
| 35 |
+
Recent studies considered a concurrent control setting where action sampling occurs simultaneously with state transition (Ramstedt & Pal, 2019; Xiao et al., 2020). Both assumed a single action selection between two consecutive observations, thus reducing the problem to an MDP with execution delay of $m = 1$ . Chen et al. (2020a) have generalized it to an arbitrary number of actions between two observations. Hester & Stone (2013) addressed execution delay in the braking control of autonomous vehicles with a relatively low delay of $m \leqslant 3$ . All these works employ state-augmentation to preserve the Markov property of the process, whereas we are interested whether this restriction can be lifted. Additionally, they studied policy-gradient (policy-based) methods, while we introduce a Q-learning style (value-based) algorithm. Likewise, Firoiu et al. (2018) proposed a modified version of the policy-based IMPALA (Espeholt et al., 2018) which is evaluated on a single video game with delay values of $m \leqslant 7$ . To the best of our knowledge, our work is the first to tackle a delayed variant of the popular Atari suite (Bellemare et al., 2013).
|
| 36 |
+
|
| 37 |
+
Contributions. Revisiting RL with execution delay both in theory and practice, we introduce:
|
| 38 |
+
|
| 39 |
+
1. Analysis of a delayed MDP quantifying the trade-off between stochasticity and delay.
|
| 40 |
+
2. The first tight upper and lower complexity bounds on policy iteration for action-augmented MDPs. We stress that this is also a contribution to general RL theory of non-delayed MDPs.
|
| 41 |
+
3. A new formalism of execution-delay MDPs that avoids action-embedding. Using it, we prove that out of the larger set of history-dependent policies, restricting to non-stationary deterministic Markov policies is sufficient for optimality in delayed MDPs. We also derive a Bellman-type recursion for a delayed value function.
|
| 42 |
+
4. A model-based DQN-style algorithm that yields non-stationary Markov policies. Our algorithm outperforms the alternative standard and state-augmented DDQN in 39 of 42 experiments spanning over 3 environment categories and delay of up to $m = 25$ .
|
| 43 |
+
|
| 44 |
+
# 2 PRELIMINARIES: NON-DELAYED STANDARD MDP
|
| 45 |
+
|
| 46 |
+
Here, we describe the standard non-delayed MDP setup. Later, in Sec. 5, we introduce its generalization to the delayed case. We follow and extend notations from (Puterman, 2014)[Sec. 2.1.]. An infinite horizon discounted MDP is a tuple $(S, \mathcal{A}, P, r, \gamma)$ where $S$ and $\mathcal{A}$ are finite state and action spaces, $P: S \times \mathcal{A} \to \Delta_S$ is a transition kernel, the reward $r: S \times \mathcal{A} \to \mathbb{R}$ is a bounded function, and $\gamma \in [0,1)$ is a discount factor. At time $t$ , the agent is in $s_t$ and draws an action $a_t$ according to a decision rule $d_t$ that maps past information to a probability distribution $q_{d_t}$ over the action set. Once $a_t$ is taken, the agent receives a reward $r(s_t, a_t)$ .
|
| 47 |
+
|
| 48 |
+
A decision rule can be history-dependent (H) or Markovian (M), and randomized (R) or deterministic (D). Denote by $\mathcal{H}_t$ the set of possible histories up to time $t$ . Then, a history-dependent decision-rule is given by $d_t: \mathcal{H}_t \to \Delta_A$ with $h_t \mapsto q_{d_t(h_t)}(\cdot)$ . A Markovian decision-rule, on the other hand, maps states to actions, i.e., $d_t: S \to \Delta_A$ with $s \mapsto q_{d_t(s)}(\cdot)$ . A policy $\pi := (d_t)_{t \geqslant 0}$ is a sequence of decision rules whose type dictates that of the policy. It can be either Markovian deterministic ( $\Pi^{\mathrm{MD}}$ ) or randomized ( $\Pi^{\mathrm{MR}}$ ), history-dependent deterministic ( $\Pi^{\mathrm{HD}}$ ) or randomized ( $\Pi^{\mathrm{HR}}$ ). It is stationary if its decision rules do not depend on time, i.e., $d_t = d$ for all $t \geqslant 0$ . This defines the smaller class of stationary policies: deterministic ( $\Pi^{\mathrm{SD}}$ ) and randomized ( $\Pi^{\mathrm{SR}}$ ). Note that stationary policies are inherently Markovian. Indeed, at time $t = 0$ , $d: \mathcal{H}_0 \to \Delta_A$ is state-dependent because $\mathcal{H}_0 = S$ . Since the policy is stationary, i.e., $d_t = d \forall t$ , subsequent decision rules are also state-dependent, thus Markovian. This makes $\Pi^{\mathrm{HR}}$ the most general set and $\Pi^{\mathrm{SD}}$ the most specific.
|
| 49 |
+
|
| 50 |
+
We denote probability model by $\mathbb{P}_0^\pi$ , where the subscript 0 stands for the delay value $m = 0$ . The related random variables are denoted by $\tilde{s}_t \in S$ , $\tilde{a}_t \in \mathcal{A}$ and $\tilde{h}_t \in (S \times \mathcal{A})^t \times S$ . The value function given policy $\pi \in \Pi^{\mathrm{HR}}$ is defined as $v^\pi(s) = \mathbb{E}_0^\pi \left[ \sum_{t=0}^\infty \gamma^t r(\tilde{s}_t, \tilde{a}_t) \Bigg| \tilde{s}_0 = s \right]$ , where the expectation is taken with respect to (w.r.t.) $\mathbb{P}_0^\pi(\cdot | \tilde{s}_0 = s)$ . Let the optimal value function
|
| 51 |
+
|
| 52 |
+
$$
|
| 53 |
+
v ^ {*} (s) := \max _ {\pi \in \Pi^ {\mathrm {H R}}} v ^ {\pi} (s), \quad \forall s \in \mathcal {S}. \tag {1}
|
| 54 |
+
$$
|
| 55 |
+
|
| 56 |
+
Our goal is to find a policy $\pi^{*}$ that yields $v^{*}$ , and it is known that focusing on stationary deterministic policies $\pi \in \Pi^{\mathrm{SD}}$ is sufficient for reaching the optimum in (1) [Puterman, 2014] [Thm. 6.2.10].
|
| 57 |
+
|
| 58 |
+
# 3 MDPS WITH DELAY: A DEGRADATION EXAMPLE
|
| 59 |
+
|
| 60 |
+
In an MDP with execution delay $\square m$ , any action chosen at time $t$ is executed at $t + m$ . Therefore, at each step, the agent witnesses the current state and action being executed, but selects a new action that will be applied in a future state. We assume that $m$ decided actions are already awaiting execution at $t = 0$ , so at any given time, the queue of pending actions is of constant length $m$ . As we illustrate in the next example, having a delay generally comes at a price.
|
| 61 |
+
|
| 62 |
+
Example 3.1 (Two-state MDP). Consider the MDP in Fig. [7]. It has two states and two actions: $\mathcal{S} = \{s_0, s_1\}$ , $\mathcal{A} = \{a_0, a_1\}$ . The transition kernel is independent of the action: for all $s, s' \in \mathcal{S}$ s.t. $s \neq s'$ , $P(s'|s, a) = P(s'|s) = p$ where $p \in [0.5, 1]$ . The reward is positive for one of the two actions only: $r(s_0, a_0) = r(s_1, a_1) = 1$ , $r(s_0, a_1) = r(s_1, a_0) = 0$ .
|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
Figure 1: Degradation due to delay in a two-state MDP.
|
| 66 |
+
|
| 67 |
+
We inspect the return obtained from the commonly used set of stationary deterministic policies $\Pi^{\mathrm{SD}}$ . As expected, the highest possible return is attained when $m = 0$ , but monotonically decreases with the delay, $m$ , and increases with the level of certainty, $p$ . We analytically quantify this effect in the following and give a proof in Appx. A.1
|
| 68 |
+
|
| 69 |
+
Proposition 3.1. For delay $m \in \mathbb{N}$ and $p \in [0.5, 1]$ , the optimal return of $\pi^* \in \Pi^{\mathrm{SD}}$ is $\frac{1 + (2p - 1)^m}{2(1 - \gamma)}$ .
|
| 70 |
+
|
| 71 |
+
Remark 3.1. This result demonstrates a clear tradeoff between stochasticity and delay. For $p \to 0.5$ or $m \to \infty$ , the return goes to its minimal value of $0.5 / (1 - \gamma)$ . Contrarily, for $p \to 1$ or $m \to 0$ , it goes to its maximal value of $1 / (1 - \gamma)$ .
|
| 72 |
+
|
| 73 |
+
# 4 THE AUGMENTATION APPROACH
|
| 74 |
+
|
| 75 |
+
In this section, we consider state-augmentation for solving MDPs with execution delay. We begin with defining an equivalent MDP with a larger state space that memorizes all missing information for an informed decision. Due to the full observability, the resulting optimal augmented policy attains the optimal return in the original delayed MDP.
|
| 76 |
+
|
| 77 |
+
Definition 4.1 (m-AMDP). Given MDP $(S, \mathcal{A}, P, r, \gamma)$ and $m \in \mathbb{N}$ , an m-Augmented MDP (m-AMDP) is a tuple $(\mathcal{X}_m, \mathcal{A}, F, g, \gamma)$ such that $\mathcal{X}_m := S \times \mathcal{A}^m$ is the augmented state-space, $\mathcal{A}$ the original action-space, $F$ is the transition matrix given in Appx. B.1 (14), and $g$ is the reward function given in Appx. B.1 (15).
|
| 78 |
+
|
| 79 |
+
The pending action queue is concatenated to the original state to form an augmented state $x_{t} \coloneqq (s_{t}, a_{t}^{-1}, \dots, a_{t}^{-m}) \in \mathcal{X}_{m}$ , where $a_{t}^{-i}$ is the $i$ -th pending action at time $t$ . It means that in the following step, $t + 1$ , action $a_{t}^{-m}$ will be executed independently of the present action selection, the queue will shift to the right, and the newly selected action will be at the second coordinate. By construction, the $m$ -AMDP is non-delayed; it directly accounts for execution delay through its state-representation, as opposed to our coming formulation in Sec. 5. We further define a stationary deterministic policy $\bar{\pi} \in \bar{\Pi}_{m}^{\mathrm{SD}}$ with corresponding decision rule $\bar{d}: \mathcal{X}_{m} \to \Delta_{\mathcal{A}}$ and augmented value function $v^{\bar{\pi}}(x) \coloneqq \mathbb{E}^{\bar{\pi}}\left[\sum_{t=0}^{\infty} \gamma^{t} g(\tilde{x}_{t}, \tilde{a}_{t}) |\tilde{x}_{0} = x\right]$ . As in (11), our goal is to solve $\bar{v}^{*}(x) = \max_{\bar{\pi} \in \bar{\Pi}_{m}^{\mathrm{SD}}} v^{\bar{\pi}}(x), \quad \forall x \in \mathcal{X}_{m}$ .
|
| 80 |
+
|
| 81 |
+
We now analyze the classical Policy Iteration (PI) algorithm (Howard, 1960) for $m$ -augmented MDPs and provide a finite-time analysis of its convergence. We refer to it as $mA$ -PI and provide its pseudo-code in Appx.B.2. We consider PI since it is a canonical representative upon which many other algorithms are built. Admittedly, we did not find any other formal result quantifying the effect of augmentation on a planning or learning algorithm, other than a PAC upper bound for R-max with $\epsilon$ -optimal policies (Walsh et al., 2009). A proof for the next result is given in Appx.B.4.
|
| 82 |
+
|
| 83 |
+
Theorem 4.1 (Lower Bound for mA-PI). The number of iterations required for mA-PI to converge in $m$ -AMDP $\mathcal{M}_m$ is $\Omega(|\mathcal{X}_m|) = \Omega(|\mathcal{S}||\mathcal{A}|^m)$ .
|
| 84 |
+
|
| 85 |
+
Thm. 4.1 does not take advantage of the special delay problem structure but rather is an application of our more general result to augmented MDPs (Appx.B.4). As pointed out in Scherrer et al. (2016), the lower-bound complexity of PI is considered an open problem, at least in the most general MDP formulation. Lower-bounds have been derived in specific cases only, such as deterministic MDPs (Hansen & Zwick, 2010), total reward criterion (Fearnley, 2010) or high discount factor (Hollanders et al., 2012). Even though we did not intend to directly address this open question, our lower bound result seems to be a contribution on its own to the general theory of non-delayed MDPs.
|
| 86 |
+
|
| 87 |
+
Next, we show that the above lower bound is tight (up to a factor of $|\mathcal{A}|$ and logarithmic terms) and mA-PI is guaranteed to converge after $\tilde{O}(|\mathcal{S}||\mathcal{A}|^{m+1})$ . A proof is given in Appx. B.5
|
| 88 |
+
|
| 89 |
+
Theorem 4.2 (mA-PI Convergence). The mA-PI algorithm converges to the optimal value-policy pair $(\bar{v}^{*},\bar{\pi}^{*})$ in at most $|\mathcal{S}||\mathcal{A}|^m (|\mathcal{A}| - 1)\left[\log \left(1 / \gamma\right)^{-1}\log \left(1 / 1 - \gamma\right)\right]$ iterations.
|
| 90 |
+
|
| 91 |
+
# 5 EXECUTION-DELAY MDP: A NEW FORMULATION
|
| 92 |
+
|
| 93 |
+
In this section, we introduce and study the stochastic process generated by an MDP with execution delay, without resorting to state-augmentation. In the ED-MDP we consider, the probability measure changes according to the delay value $m$ . We assume that during the $m$ initial steps, actions are sequentially executed according to a fixed queue $\bar{a} := (\bar{a}_0, \dots, \bar{a}_{m-1}) \in \mathcal{A}^m$ . Unlike $m$ -AMDPs, the initial queue of pending actions here plays the role of an exogenous variable that is not embedded into the state-space. A policy $\pi \in \Pi^{\mathrm{HR}}$ induces a probability measure $\mathbb{P}_m^\pi$ that is defined through a set of equations which, for brevity, we defer to Appx.[16] [19]. We note that for $t < m$ , decision rules do not depend on the history, while for $t \geqslant m$ , they depend on the history up to $t - m$ only. Let $\mu$ be an initial state distribution and $\delta$ a Dirac distribution. Using this and the notations from Sec.2 we can explicitly write the probability of a sample path. See proof in Appx.[C.1]
|
| 94 |
+
|
| 95 |
+
Proposition 5.1. For policy $\pi \coloneqq (d_0, d_1, \dots) \in \Pi^{\mathrm{HR}}$ , the probability of observing history $h_t \coloneqq (s_0, a_0, s_1, a_1 \dots, a_{t-1}, s_t)$ is given by:
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
\begin{array}{l} \mathbb {P} _ {m} ^ {\pi} \left(\tilde {s} _ {0} = s _ {0}, \tilde {a} _ {0} = a _ {0}, \tilde {s} _ {1} = s _ {1}, \tilde {a} _ {1} = a _ {1}, \dots , \tilde {a} _ {t - 1} = a _ {t - 1}, \tilde {s} _ {t} = s _ {t}\right) \\ = \mu (s _ {0}) \left(\prod_ {k = 0} ^ {m - 1} \delta_ {\tilde {a} _ {k}} (a _ {k}) p (s _ {k + 1} | s _ {k}, a _ {k})\right) \left(\prod_ {k = m} ^ {t - 1} q _ {d _ {k - m} (h _ {k - m})} (a _ {k}) p (s _ {k + 1} | s _ {k}, a _ {k})\right). \\ \end{array}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
From Prop. 5.1 we deduce that, differently than the standard MDP setting where any Markov policy induces a Markov process, the delayed process is not Markovian even for stationary policies (see
|
| 102 |
+
|
| 103 |
+
Appx. C.2 for a formal proof). Next, we show that for any history-dependent policy and starting state, there exists a Markov policy (not necessarily stationary) that generates the same process distribution. Consequently, despite execution delay, one can restrict attention to Markov policies without impairing performance.
|
| 104 |
+
|
| 105 |
+
Theorem 5.1. Let $\pi \in \Pi^{\mathrm{HR}}$ be a history dependent policy. For all $s_0 \in S$ , there exists a Markov policy $\pi' \in \Pi^{\mathrm{MR}}$ that yields the same process distribution as $\pi$ , i.e., $\mathbb{P}_m^\pi(\tilde{s}_{t-m} = s', \tilde{a}_t = a | \tilde{s}_0 = s_0) = \mathbb{P}_m^\pi(\tilde{s}_{t-m} = s', \tilde{a}_t = a | \tilde{s}_0 = s_0)$ , $\forall a \in A, s' \in S, t \geqslant m$ .
|
| 106 |
+
|
| 107 |
+
The proof is given in Appx. C.3. It builds on the concept that for each history-dependent policy $\pi \in \Pi^{\mathrm{HR}}$ , one can choose a sequence of Markov decision rules that reconstruct the same time-dependent action distribution in the process induced by $\pi$ .
|
| 108 |
+
|
| 109 |
+
This result proves attainability of the optimum over $\Pi^{\mathrm{MR}}$ , but not how one can efficiently find an optimal policy. In Appx. C.5 (27), we formally define the delayed value function $v_{m}^{\mu_{0}:\mu_{m-1},\pi}$ for policy $\pi$ and initial action distribution queue $\mu_{0}:\mu_{m-1}:= (\mu_{0},\ldots,\mu_{m-1})$ . In Thm. C.1 there, we show that it satisfies a non-stationary Bellman-type recursion. Though the question of how to efficiently find an optimal non-stationary Markov policy remains generally open, we partially answer it by proving that a deterministic Markov policy is sufficient for the optimal delayed value function.
|
| 110 |
+
|
| 111 |
+
Theorem 5.2. For any action distribution queue $\mu_0:\mu_{m - 1}\coloneqq (\mu_0,\ldots ,\mu_{m - 1})$ and $s_0\in S$
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\max _ {\pi \in \Pi^ {\mathrm {M D}}} v _ {m} ^ {\mu_ {0}: \mu_ {m - 1}, \pi} = \max _ {\pi \in \Pi^ {\mathrm {M R}}} v _ {m} ^ {\mu_ {0}: \mu_ {m - 1}, \pi}.
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
Degradation due to stationarity. To complement the finding that a deterministic Markov policy can be optimal for any ED-MDP, we show that restricting to stationary policies impairs performance in general. Thus, while in non-delayed MDPs it is enough to focus on the latter, in ED-MDPs the restriction should be to the more general class of Markov policies.
|
| 118 |
+
|
| 119 |
+
Proposition 5.2. There exists an $m$ -ED-MDP for which all stationary policies are sub-optimal.
|
| 120 |
+
|
| 121 |
+
This result follows from computing the optimal return for stationary and non-stationary policies in the ED-MDP from Example 3.1 using simulation. We elaborate on this further in Appx. C.4. There, we also confirm that our theoretical return from Prop. 3.1 matches closely with simulation. Lastly, a visualization of the results from this section is given in Fig. 2
|
| 122 |
+
|
| 123 |
+

|
| 124 |
+
Figure 2: Optimality of policy types in ED-MDPs: Markovness is sufficient but nonstationarity is necessary.
|
| 125 |
+
|
| 126 |
+
# 6 A NEW ALGORITHM: DELAYED-Q
|
| 127 |
+
|
| 128 |
+
We now introduce an algorithm capable of successfully handling tasks with execution delay by inferring the future $m$ -step state before each decision.
|
| 129 |
+
|
| 130 |
+
Algorithm Description. Fig. 3 depicts the algorithm. As a first stage, to select an action $a_{t}$ to be executed in a future state $s_{t + m}$ , we infer that future state $\hat{s}_{t + m}$ using the current state $s_t$ and the queue of pending actions $(a_{t - m},\dots ,a_{t - 1})$ . This is done by successively applying an approximate forward model $m$ times: $\hat{s}_{t + 1} = f(s_t,a_{t - m}),\ldots ,\hat{s}_{t + m} = f(\hat{s}_{t + m - 1},a_{t - 1})$ . More details on the forward models are given in Sec. 7. The approximate model here is simpler than other model-based algorithms such as tree-search
|
| 131 |
+
|
| 132 |
+

|
| 133 |
+
Figure 3: Delayed-Q algorithm diagram.
|
| 134 |
+
|
| 135 |
+
methods, because it does not require access to the reward function. Also, only a single trajectory is
|
| 136 |
+
|
| 137 |
+
sampled rather than exponentially many w.r.t. the horizon length. We do note this method benefits from the environment not being entirely stochastic (Walsh et al. 2009). Still, as we show next, it performs well even on noisy environments. As a second stage, we select an action according to a policy $a_{t} = \pi (\hat{s}_{t + m})$ . The two stages of this procedure can be represented as a non-stationary Markov policy $\pi_t(s_t)$ , where the non-stationarity stems from the time-dependency of the action queue, and the Markov property from the policy being applied on $s_t$ and no prior history. Notably, the Q-function here does not take past actions as input, contrarily to the augmentation approach in Sec. 4. To better stress the non-stationarity, we note that applying the policy on the same state at different times can output different actions. Lastly, for training, we maintain a sample-buffer of length $m$ which we use to shift action $a_{t}$ into the tuple $(s_{t + m},r_{t + m},a_{t},s_{t + m + 1})$ prior to each insertion to the replay buffer. During the course of this work, we also experimented with a model-free variant. Instead of 'un-delaying' the Q-function with the forward-model, we defined a delayed Q-function trained on sequences whose actions were shifted $m$ steps forward. However, the obtained results were unsatisfactory, seemingly because the Q-function is unable to implicitly learn the $m$ -step transition.
|
| 138 |
+
|
| 139 |
+
Point-Estimate Approaches. For completeness, we mention alternatives to using a 'most-likely' state estimate, such as an expected future state. To demonstrate why point-estimate prediction can be devastating, consider an MDP where $s = (x,t)$ : position and time, respectively. Starting from $s_0 = (0,0)$ , $t$ progresses deterministically, while $x$ behaves like a random walk with momentum; i.e., if $x > 0$ , then $x + 1$ is more likely than $x - 1$ , and vice versa. The process obviously diverges with time. Consider two actions: one is good when $|x|$ is big, and the other when $|x|$ is small. For a large delay $m$ , the PDF of the state is bi-modal and symmetric around $(Z,m)$ and $(-Z,m)$ for some finite $Z$ . But, a point estimate (e.g., ML or MAP) would yield a value of $(0,m)$ . In addition to this example, we observe that in our Ex. 3.1 any alternative to a 'most-likely' state estimate is worse: there, the optimal policy applies actions based on the most-likely state (see proof of Prop. 3.1), while it is easy to see that any other policy weighing future state probabilities leads to lower reward.
|
| 140 |
+
|
| 141 |
+
# 7 EXPERIMENTS
|
| 142 |
+
|
| 143 |
+
We perform experiments in a wide range of domains: tabular, physical, and image-based Atari. All of them include stochasticity: In the maze we inject noise to actions; in the physical domains we perturb the masses at each step; and Atari is stochastic by nature. We compare our algorithm with two baselines: Oblivious-Q and Augmented-Q. Oblivious-Q is the standard Q-learning that ignores delay and assumes each decision to be immediately executed. Augmented-Q acts on the $m$ -AMDP introduced in Def. 4.1 We test all domains on delays $m \in \{0,5,15,25\}$ with 5 seeds per each run. All results are summarized in Fig. 10 and are provided in more detail with std. in Appx. D.1 Table 2
|
| 144 |
+
|
| 145 |
+

|
| 146 |
+
Figure 4: Maze: Time complexity as a function of $m$
|
| 147 |
+
|
| 148 |
+
Tabular Maze Domain. We begin with testing Delayed-Q on a Maze domain (Brockman et al., 2016)[tinyurl.com/y34tmfm9]. It is based on tabular Q-learning and enables us to study the merits of our method decoupled from the coming DDQN added complexities. Moreover, it conveys the exponential complexity of Augmented-Q. The forward-model we construct is naturally tabular as well: it predicts a state $s'$ according to the highest visitation frequency given $(s, a)$ . The objective in Maze is to find the shortest path from a start position to a goal state in a randomly-generated $N \times N$ maze. Reaching the goal yields a reward of 1, and $-1/(10N^2)$ per step otherwise. The maximal episode length is $10N^2$ steps, so the cumulative reward is in $[-1, 1]$ . We also create a Noisy Maze environment that perturbs each action w.p. $p \in [0, 0.5]$ .
|
| 149 |
+
|
| 150 |
+
Convergence plots are given in Fig. 6. Delayed-Q outperforms the rest for all delay values $m$ , while Oblivious-Q fails in all runs for $m > 0$ . Since the augmented state-space grows exponentially with $m$ , Augmented-Q converges more slowly as $m$ increases. In fact, for $m > 15$ the simulation fails to run due to memory incapacity for the Q-table; this explains its absence in Figs. 6.10 To confirm the exponential complexity growth of Augmented-Q and compare it with Delayed-Q, we trained both agents with increasing delay values, and reported the number of training episodes each one required before reaching a cumulative reward of 0.5. Fig. 4 clearly demonstrates the exponential (resp. linear) dependence of Augmented-Q (resp. Delayed-Q) in the delay value. The linear dependence of Delayed-Q in $m$ is not surprising: Delayed-Q is algorithmically identical to Q-learning, except
|
| 151 |
+
|
| 152 |
+
for the $m$ -step forward-model calls and the replay buffer shift of $m$ samples. To further analyze its sensitivity to the state-space size, we ran tabular Delayed-Q on increasing maze sizes, for a fixed $m = 5$ . As Fig. 5(c) shows, the performance drops exponentially, suggesting high sensitivity to the state-space size and highlighting one shortcoming of MBS (Walsh et al., 2009) (see Sec. I).
|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
(a)
|
| 156 |
+
|
| 157 |
+

|
| 158 |
+
(b)
|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
(c)
|
| 162 |
+
|
| 163 |
+

|
| 164 |
+
Figure 5: Delayed-Q (median over 5 seeds): (a) Total reward after 5000 training episodes on $10 \times 10$ Maze. Performance is sensitive to both delay value and stochasticity. (b) Noisy Cartpole. (c) Reward on varying Maze sizes. Abscissa is in log-scale, so the return decreases exponentially with $m$ .
|
| 165 |
+
|
| 166 |
+

|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
Figure 6: Convergence plots for Maze, Noisy Cartpole and Atari MsPacman. Note that the scale of the y-axes (performance) may change from figure to figure.
|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
|
| 175 |
+

|
| 176 |
+
|
| 177 |
+

|
| 178 |
+
|
| 179 |
+

|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
|
| 189 |
+
Physical Domains. Next, we test our approach on two continuous domains: CartPole and Acrobot. The CartPole task requires balancing a pole connected to a cart that actuates left or right. In Acrobot, one needs to swing up the lower of two links connected by a joint above a certain height. The agent receives a reward of 1 if the pole stays above a certain angle in Cartpole, and in Acrobot it receives $-1$ until it reaches the goal. The episode length is 500 steps in both tasks. We also create noisy versions of both tasks: At each step, normal additive noises are independently added to each physical component's mass, with std of 0.1 of the nominal mass.
|
| 190 |
+
|
| 191 |
+
We extend the famous DDQN algorithm (Van Hasselt et al., 2015) and compare to it, though our method is general and can be seamlessly integrated into any Q-learning based algorithm. Our one-step forward-model is implemented with a neural network (NN) of the same architecture as the Q-network. Namely, it consists of two hidden layers, each of width 24, with ReLu activations. The input of the
|
| 192 |
+
|
| 193 |
+

|
| 194 |
+
Figure 7: Performance as a function of the delay (from left to right): Maze, Noisy Cartpole, Noisy Acrobot. For Augmented-Q in Maze, $m > 10$ is missing due to explosion of the state-space.
|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
Figure 9: Noisy Cartpole: Performance gap between true and trained forward model.
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
|
| 209 |
+
forward-model NN is the concatenation of $(s, a)$ and its output is $s'$ . Training the forward-model NN is conducted together with the Q-network training with the same hyperparameters and sample batches; this makes the implementation easy and simple. For Augmented-Q, a concatenation of the pending actions to the state is fed to the Q-network.
|
| 210 |
+
|
| 211 |
+
Fig. 6 depicts the performance of the three algorithms for different values of $m$ for Noisy Cartpole. As expected from a physical domain, ignoring delay gives catastrophic results even for $m = 5$ . Augmented-Q performs moderately up to $m = 15$ , but fails for larger delays. Delayed-Q performs the best for all $m$ values, and performs well even on the challenging task of balancing a noisy pole with $m = 25$ . We observe similar behavior in all Cartpole and Acrobot experiments, as shown in Fig. 10. Moreover, in Fig. 7, we demonstrate the relative robustness of Delayed-Q to different delay values. All tested environments exhibit superior performance of Delayed-Q for a wide range of delays. In Noisy Acrobot, Delayed-Q performs better for $m = 25$ than the alternatives do for $m = 2$ . Figs. 5(a)–5(b) show a clear trade-off between noise and delay, as we also discuss in Rmk. For high delays, the agent is much more sensitive to an increase in sto
|
| 212 |
+
|
| 213 |
+

|
| 214 |
+
Figure 8: Performance gap for Delayed-Q trained with a delay of $m = 10$ .
|
| 215 |
+
|
| 216 |
+
To quantify the dependence of Delayed-Q on the model accuracy, we compare the learned model to a perfect one, i.e., the environment itself. Fig. 9 shows performance is impaired more as the delay increases and suggests a better model can potentially improve reward by $20 - 30\%$ . Further, we test the robustness of Delayed-Q to misspecified delay by training it with $m = 10$ and evaluating on other delay values. Fig. 8 shows the evaluation performance for $m \in \{5, \dots, 15\}$ . It demonstrates the robustness of our method – varying performance in evaluation (for good or bad) does not stem from delay misspecification. Instead, the delay is 'forgotten' after training, and Fig. 8 depicts the general effect of execution delay on performance. For shorter delay than the training one, i.e., $m < 10$ , performance even improves. The reason is that, first, during training, the Q-function is 'un-delayed' due to the replay buffer shift that relates the actions to the correct execution time. Second, the forward-model is trained based on single-step transitions and only during inference is it queried $m$ times. Thus, these two networks composing the agent are oblivious to the delay they were trained on.
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
Figure 10: Experiment summary: mean of episodic return for all domains. Delayed-Q outperforms the alternatives in 39 of 42 experiments. Due to negative reward, a positive translation of 1 is applied for Maze and 500 for Acrobot. Atari x-axis is the gain relative to lowest result in each experiment.
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
|
| 225 |
+
Atari Domains. We run the last set of experiments on the Atari Learning Environment (Bellemare et al., 2013). We inspect 8 games from those that were successfully tackled with the original Q-network architecture and hyperparameters of DDQN (Van Hasselt et al., 2015). Since a learned forward-model for images conditioned on actions is a hanging question in the research frontier, we leave it for future work and use the simulator itself for prediction. It is stochastic in nature and thus encompasses approximation error. For Augmented-Q, we concatenate the action queue to the output of the CNN part of the Q-network; the extended vector is then fed into the subsequent fully-connected part of it. We train all games for 1M steps. Fig. 6 shows convergence plots for MsPacman. Delayed-Q is consistently better than Augmented-Q for all $m$ values, which is, in turn, better than Oblivious-Q. Although the gap between all three algorithms is small for $m = 5$ , it increases with $m$ . For $m = 25$ , the delay is too large for the augmentation to have a positive effect compared to Oblivious-Q, and they perform the same. This behavior is representative of all Atari games, as can be seen in Fig. 10. Lastly, we compared Delayed-Q with a fourth algorithm which uses an RNN policy that is unaware of the delay value. The results are given in Appx. D.2 showing that a recurrent policy does not improve upon Augmented-Q or Oblivious-Q. This result is not surprising though: as stated in Thm. 5.1, the history sequence $s_{t - m}, s_{t - m - 1}, \ldots$ does not aid the policy any further than only using $s_{t - m}$ .
|
| 226 |
+
|
| 227 |
+
# 8 DISCUSSION
|
| 228 |
+
|
| 229 |
+
In this work, we found that non-stationary deterministic Markov policies are optimal in delayed MDPs. Though more expressive, the standard state augmentation approach is intractable for all but the shortest delays, while the oblivious approach that ignores delay suffers from inferior performance. We derived a Q-learning based algorithm that generates a Markov policy by combining a transition forward model with Q-network. The forward-model produces a simple future-state estimate. Incorporating probabilistic estimates and other improvements such as integration of image-based action-dependent learned forward-models (Kim et al., 2020), are left for future research. Extensions of our work for real-world applications can be unknown or varying delay. In the first case, a good prior for the delay value can often be used, e. g., for autonomous vehicles, as the latency statistics of the different hardware and software components are well studied (Zhao et al., 2019; Niu et al., 2019), while in production systems, they are almost constant (Toschi et al., 2019). Our algorithm is also readily extendable to the second case of varying delay. Differently from the augmentation approach, our 1-step forward-model decouples the algorithm from the delay used for training, as Fig. 8 depicts. Also, quantization of the delay is not essential as long as the forward model can operate with variable delay values. Finally, our framework can be extended to policy-gradient-based methods that are particularly useful for continuous control, where observation delay is inherent.
|
| 230 |
+
|
| 231 |
+
# ACKNOWLEDGEMENTS
|
| 232 |
+
|
| 233 |
+
The authors would like to thank Daniel J. Mankowitz and Timothy A. Mann for motivating this work.
|
| 234 |
+
|
| 235 |
+
# REFERENCES
|
| 236 |
+
|
| 237 |
+
Avner Bar-Ilan and Agnès Sulem. Explicit solution of inventory problems with delivery lags. Mathematics of Operations Research, 20(3):709-720, 1995.
|
| 238 |
+
Marc G Bellemare, Yavar Naddaf, Joel Veness, and Michael Bowling. The arcade learning environment: An evaluation platform for general agents. Journal of Artificial Intelligence Research, 47: 253-279, 2013.
|
| 239 |
+
Dimitri P Bertsekas, Dimitri P Bertsekas, Dimitri P Bertsekas, and Dimitri P Bertsekas. Dynamic programming and optimal control, volume 1. Athena scientific Belmont, MA, 1995.
|
| 240 |
+
Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. OpenAI Gym. arXiv:1606.01540v1, 2016.
|
| 241 |
+
Benjamin Bruder and Huyen Pham. Impulse control problem on finite horizon with execution delay. Stochastic Processes and their Applications, 119(5):1436-1469, 2009.
|
| 242 |
+
Jeffrey S Campbell, Sidney N Givigi, and Howard M Schwartz. Multiple model q-learning for stochastic asynchronous rewards. Journal of Intelligent & Robotic Systems, 81(3-4):407-422, 2016.
|
| 243 |
+
Baiming Chen, Mengdi Xu, Liang Li, and Ding Zhao. Delay-aware model-based reinforcement learning for continuous control. arXiv preprint arXiv:2005.05440, 2020a.
|
| 244 |
+
Baiming Chen, Mengdi Xu, Zuxin Liu, Liang Li, and Ding Zhao. Delay-aware multi-agent reinforcement learning. arXiv preprint arXiv:2005.05441, 2020b.
|
| 245 |
+
Luc Dugard and Erik I Verriest. Stability and control of time-delay systems, volume 228. Springer, 1998.
|
| 246 |
+
Gabriel Dulac-Arnold, Daniel Mankowitz, and Todd Hester. Challenges of real-world reinforcement learning. arXiv preprint arXiv:1904.12901, 2019.
|
| 247 |
+
Lasse Espeholt, Hubert Soyer, Remi Munos, Karen Simonyan, Volodymir Mnih, Tom Ward, Yotam Doron, Vlad Firoiu, Tim Harley, Iain Dunning, et al. Impala: Scalable distributed deep-rl with importance weighted actor-learner architectures. arXiv preprint arXiv:1802.01561, 2018.
|
| 248 |
+
John Fearnley. Exponential lower bounds for policy iteration. In International Colloquium on Automata, Languages, and Programming, pp. 551-562. Springer, 2010.
|
| 249 |
+
Vlad Firoiu, Tina Ju, and Josh Tenenbaum. At human speed: Deep reinforcement learning with action delay. arXiv preprint arXiv:1810.07286, 2018.
|
| 250 |
+
Emilia Fridman. Introduction to time-delay systems: Analysis and control. Springer, 2014.
|
| 251 |
+
Thomas Dueholm Hansen and Uri Zwick. Lower bounds for howard's algorithm for finding minimum mean-cost cycles. In International Symposium on Algorithms and Computation, pp. 415-426. Springer, 2010.
|
| 252 |
+
Todd Hester and Peter Stone. Texlore: real-time sample-efficient reinforcement learning for robots. Machine learning, 90(3):385-429, 2013.
|
| 253 |
+
Romain Hollanders, Jean-Charles Delvenne, and Raphaël M Jungers. The complexity of policy iteration is exponential for discounted markov decision processes. In 2012 IEEE 51st IEEE Conference on Decision and Control (CDC), pp. 5997-6002. IEEE, 2012.
|
| 254 |
+
Ronald A Howard. Dynamic programming and Markov processes. John Wiley, 1960.
|
| 255 |
+
|
| 256 |
+
Pooria Joulani, Andras Gyorgy, and Csaba Szepesvári. Online learning under delayed feedback. In International Conference on Machine Learning, pp. 1453-1461, 2013.
|
| 257 |
+
Konstantinos V Katsikopoulos and Sascha E Engelbrecht. Markov decision processes with delays and asynchronous cost collection. IEEE transactions on automatic control, 48(4):568-574, 2003.
|
| 258 |
+
Seung Wook Kim, Yuhao Zhou, Jonah Philion, Antonio Torralba, and Sanja Fidler. Learning to simulate dynamic environments with gamean. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 1231-1240, 2020.
|
| 259 |
+
Wei Niu, Xiaolong Ma, Yanzhi Wang, and Bin Ren. 26ms inference time for resnet-50: Towards real-time execution of all dnns on smartphone. arXiv preprint arXiv:1905.00571, 2019.
|
| 260 |
+
Ciara Pike-Burke, Shipra Agrawal, Csaba Szepesvari, and Steffen Grünewälder. Bandits with delayed anonymous feedback. stat, 1050:20, 2017.
|
| 261 |
+
Martin L Puterman. Markov Decision Processes.: Discrete Stochastic Dynamic Programming. John Wiley & Sons, 2014.
|
| 262 |
+
Simon Ramstedt and Chris Pal. Real-time reinforcement learning. In Advances in Neural Information Processing Systems, pp. 3073-3082, 2019.
|
| 263 |
+
Jean-Pierre Richard. Time-delay systems: an overview of some recent advances and open problems. *automatica*, 39(10):1667-1694, 2003.
|
| 264 |
+
Bruno Scherrer et al. Improved and generalized upper bounds on the complexity of policy iteration. Mathematics of Operations Research, 41(3):758-774, 2016.
|
| 265 |
+
Alessandro Toschi, Mustafa Sanic, Jingwen Leng, Quan Chen, Chunlin Wang, and Minyi Guo. Characterizing perception module performance and robustness in production-scale autonomous driving system. In IFIP International Conference on Network and Parallel Computing, pp. 235-247. Springer, 2019.
|
| 266 |
+
Hado Van Hasselt, Arthur Guez, and David Silver. Deep reinforcement learning with double q-learning. arXiv preprint arXiv:1509.06461, 2015.
|
| 267 |
+
Thomas J Walsh, Ali Nouri, Lihong Li, and Michael L Littman. Learning and planning in environments with delayed feedback. Autonomous Agents and Multi-Agent Systems, 18(1):83, 2009.
|
| 268 |
+
Ted Xiao, Eric Jang, Dmitry Kalashnikov, Sergey Levine, Julian Ibarz, Karol Hausman, and Alexander Herzog. Thinking while moving: Deep reinforcement learning with concurrent control. arXiv preprint arXiv:2004.06089, 2020.
|
| 269 |
+
Hengyu Zhao, Yubo Zhang, Pingfan Meng, Hui Shi, Li Erran Li, Tiancheng Lou, and Jishen Zhao. Towards safety-aware computing system design in autonomous vehicles. arXiv preprint arXiv:1905.08453, 2019.
|
actingindelayedenvironmentswithnonstationarymarkovpolicies/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f568b49772d4b7f93e2dca0e4612fbf14de91746505c17c398b4d06833797556
|
| 3 |
+
size 409032
|
actingindelayedenvironmentswithnonstationarymarkovpolicies/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1c0091e46175749429b84fa0dca4e5b961da0094c7c86e18ba76a753043a0277
|
| 3 |
+
size 470394
|
activationleveluncertaintyindeepneuralnetworks/1e5b4beb-60f0-452a-bd7e-2a22c1e1c5f7_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3b7ce12eb01fc0f7bb4e9f95d2ff306bb462d2e9c1a38faa44cb80f0ae440c49
|
| 3 |
+
size 132668
|
activationleveluncertaintyindeepneuralnetworks/1e5b4beb-60f0-452a-bd7e-2a22c1e1c5f7_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1957152cc5dd8144b7e14ff567495753cc6b05f1cb57c5d33cf72e82766a47a3
|
| 3 |
+
size 156908
|
activationleveluncertaintyindeepneuralnetworks/1e5b4beb-60f0-452a-bd7e-2a22c1e1c5f7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fecc18b0df8aef6540e0e162cf9d703231d4700f3b0a4aec44168ebfb0cb32ee
|
| 3 |
+
size 1755974
|
activationleveluncertaintyindeepneuralnetworks/full.md
ADDED
|
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ACTIVATION-LEVEL UNCERTAINTY IN DEEP NEURAL NETWORKS
|
| 2 |
+
|
| 3 |
+
Pablo Morales-Álvarez*
|
| 4 |
+
|
| 5 |
+
Department of Computer Science and AI University of Granada, Spain pablomorales@decsai.ugr.es
|
| 6 |
+
|
| 7 |
+
Daniel Hernández-Lobato
|
| 8 |
+
|
| 9 |
+
Department of Computer Science
|
| 10 |
+
Universidad Autonoma de Madrid, Spain
|
| 11 |
+
|
| 12 |
+
Rafael Molina
|
| 13 |
+
|
| 14 |
+
Department of Computer Science and AI
|
| 15 |
+
University of Granada, Spain
|
| 16 |
+
|
| 17 |
+
Jose Miguel Hernandez-Lobato
|
| 18 |
+
|
| 19 |
+
Department of Engineering
|
| 20 |
+
University of Cambridge, UK
|
| 21 |
+
Alan Turing Institute, London, UK
|
| 22 |
+
|
| 23 |
+
# ABSTRACT
|
| 24 |
+
|
| 25 |
+
Current approaches for uncertainty estimation in deep learning often produce too confident results. Bayesian Neural Networks (BNNs) model uncertainty in the space of weights, which is usually high-dimensional and limits the quality of variational approximations. The more recent functional BNNs (fBNNs) address this only partially because, although the prior is specified in the space of functions, the posterior approximation is still defined in terms of stochastic weights. In this work we propose to move uncertainty from the weights (which are deterministic) to the activation function. Specifically, the activations are modelled with simple 1D Gaussian Processes (GP), for which a triangular kernel inspired by the ReLu non-linearity is explored. Our experiments show that activation-level stochasticity provides more reliable uncertainty estimates than BNN and fBNN, whereas it performs competitively in standard prediction tasks. We also study the connection with deep GPs, both theoretically and empirically. More precisely, we show that activation-level uncertainty requires fewer inducing points and is better suited for deep architectures.
|
| 26 |
+
|
| 27 |
+
# 1 INTRODUCTION
|
| 28 |
+
|
| 29 |
+
Deep Neural Networks (DNNs) have achieved state-of-the-art performance in many different tasks, such as speech recognition (Hinton et al., 2012), natural language processing (Mikolov et al., 2013) or computer vision (Krizhevsky et al., 2012). In spite of their predictive power, DNNs are limited in terms of uncertainty estimation. This has been a classical concern in the field (MacKay, 1992; Hinton & Van Camp, 1993; Barber & Bishop, 1998), which has attracted a lot of attention in the last years (Lakshminarayanan et al., 2017; Guo et al., 2017; Sun et al., 2019; Wenzel et al., 2020). Indeed, this ability to "know what is not known" is essential for critical applications such as medical diagnosis (Esteva et al., 2017; Mobiny et al., 2019) or autonomous driving (Kendall & Gal, 2017; Gal, 2016).
|
| 30 |
+
|
| 31 |
+
Bayesian Neural Networks (BNNs) address this problem through a Bayesian treatment of the network weights<sup>1</sup> (MacKay, 1992; Neal, 1995). This will be referred to as weight-space stochasticity. However, dealing with uncertainty in weight space is challenging, since it contains many symmetries and is highly dimensional (Wenzel et al., 2020; Sun et al., 2019; Snoek et al., 2019; Fort et al., 2019). Here we focus on two specific limitations. First, it has been recently shown that BNNs with well-established inference methods such as Bayes by Backprop (BBP) (Blundell et al., 2015) and MC-Dropout (Gal & Ghahramani, 2016) underestimate the predictive uncertainty for instances located in-between two clusters of training points (Foong et al., 2020; Yao et al., 2019). Second, the weight-space prior does not allow BNNs to guide extrapolation to out-of-distribution (OOD) data (Sun et al., 2019; Nguyen et al., 2015; Ren et al., 2019). Both aspects are illustrated graphically in Figure 3, more details in Section 3.1.
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
Figure 1: Graphical representation of the artificial neurons for closely related methods. The subscript $d$ and the superscript $l$ refer to the $d$ -th unit in the $l$ -th layer, respectively. (a) In standard Neural Networks (NN), both the weights and the activation function are deterministic. (b) In Bayesian NNs, weights are stochastic and the activation is deterministic. (c) In auNN (this work), weights are deterministic and the activation is stochastic. (d) Deep GPs do not have a linear projection through weights, and the output is modelled directly with a GP defined on the $D^{l-1}$ -dimensional input space.
|
| 35 |
+
|
| 36 |
+
As an alternative to standard BNNs, Functional Bayesian Neural Nets (fBNN) specify the prior and perform inference directly in function space (Sun et al., 2019). This provides a mechanism to guide the extrapolation in OOD data, e.g. predictions can be encouraged to revert to the prior in regions of no observed data. However, the posterior stochastic process is still defined by a factorized Gaussian on the network weights (i.e. as in BBP), see (Sun et al., 2019, Sect. 3.1). We will show that this makes fBNN inherit the problem of underestimating the predictive uncertainty for in-between data.
|
| 37 |
+
|
| 38 |
+
In this work, we adopt a different approach by moving stochasticity from the weights to the activation function, see Figure 1. This will be referred to as auNN (activation-level uncertainty for Neural Networks). The activation functions are modelled with (one-dimensional) GP priors, for which a triangular kernel inspired by the ReLu non-linearity (Nair & Hinton, 2010; Glorot et al., 2011) is used. Since non-linearities are typically simple functions (e.g. ReLu, sigmoid, tanh), our GPs are sparsified with few inducing points. The network weights are deterministic parameters which are estimated to maximize the marginal likelihood of the model. The motivation behind auNN is to avoid inference in the complex space of weights. We hypothesise that it could be enough to introduce stochasticity in the activation functions that follow the linear projections to provide sensible uncertainty estimations.
|
| 39 |
+
|
| 40 |
+
We show that auNN obtains well-calibrated estimations for in-between data, and its prior allows to guide the extrapolation to OOD data by reverting to the empirical mean. This will be visualized in a simple 1D example (Figure 3 and Table 1). Moreover, auNN obtains competitive performance in standard benchmarks, is scalable (datasets of up to ten millions training points are used), and can be readily used for classification. The use of GPs for the activations establishes an interesting connection with deep GPs (DGPs) (Damianou & Lawrence, 2013; Salimbeni & Deisenroth, 2017). The main difference is the linear projection before the GP, recall Figure 1(c-d). This allows auNN units to model simpler mappings between layers, which are defined along one direction of the input space, similarly to neural networks. However, DGP units model more complex mappings defined on the whole input space, see also Figure 2a. We will show that auNN units require fewer inducing points and are better suited for deep architectures, achieving superior performance. Also, a thorough discussion on additional related work will be provided in Section 4.
|
| 41 |
+
|
| 42 |
+
In summary, the main contributions of this paper are: (1) a new approach to model uncertainty in DNNs, based on deterministic weights and simple stochastic non-linearities (in principle, not necessarily modelled by GPs); (2) the specific use of non-parametric GPs as a prior, including the triangular kernel inspired by the ReLu; (3) auNN addresses a well-known limitation of BNNs and fBNNs (uncertainty underestimation for in-between data), can guide the extrapolation to OOD data by reverting to the empirical mean, and is competitive in standard prediction tasks; (4) auNN units require fewer inducing points and are better suited for deep architectures than DGP ones, achieving superior performance.
|
| 43 |
+
|
| 44 |
+
# 2 PROBABILISTIC MODEL AND INFERENCE
|
| 45 |
+
|
| 46 |
+
Model specification. We focus on a supervised task (e.g. regression or classification) with training data $^2$ $\{\mathbf{x}_{n,:},\mathbf{y}_{n,:}\}_{n = 1}^N$ . The graphical model in Figure 2b will be useful throughout this section. We
|
| 47 |
+
|
| 48 |
+

|
| 49 |
+
(a)
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
Figure 2: (a) Type of mappings modelled by DGP and auNN units (colours represent different values). Whereas DGP units describe complex functions defined on the whole $D^{l-1}$ dimensional input space, the linear projection through $\mathbf{w}_d^l$ in auNN yields simpler functions defined on just one direction. This is closer in spirit to NNs, requires fewer inducing points, and is better suited for deep architectures. The inducing points are shown in black (for auNN, these correspond to (hyper)planes in the input space before the projection). (b) Probabilistic graphical model for an auNN layer. Yellow variables are to be estimated (light ones through point estimates and the dark one through a posterior distribution). The box highlights the auxiliary variables (inducing points and their values). (c) Graphical representation of the UCI gap splits. In red, a segment that crosses the gap joining two training points from different components, which will be used in the experiments.
|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
(b)
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
(c)
|
| 59 |
+
|
| 60 |
+
assume a model of $L$ layers, each one with $D^l$ units as in Figure 1c. Each activation is modelled with a (1D) GP prior, i.e. $f_{d}^{l}(a_{d}^{l})\sim \mathcal{GP}(\mu_{d}^{l},k_{d}^{l})$ , with $\mu_d^l:\mathbb{R}\to \mathbb{R}$ and $k_d^l:\mathbb{R}\times \mathbb{R}\rightarrow \mathbb{R}$ . The GP hyperparameters $\pmb{\theta}_{d}^{l}$ will be omitted for clarity (for the kernels used here, $\pmb{\theta}_{d}^{l}$ includes the amplitude and the lengthscale). Assuming independence between units, each layer depends on the previous one as:
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
\mathrm {p} \left(\mathbf {F} ^ {l} \mid \mathbf {F} ^ {l - 1}, \mathbf {W} ^ {l}\right) = \mathrm {p} \left(\mathbf {F} ^ {l} \mid \mathbf {A} ^ {l}\right) = \prod_ {d = 1} ^ {D ^ {l}} \mathrm {p} \left(\mathbf {f} _ {d} ^ {l} \mid \mathbf {a} _ {d} ^ {l}\right), \tag {1}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
where $\mathbf{F}^l$ is the $N\times D^{l}$ matrix of outputs of the $l$ -th layer for $N$ inputs, $\mathbf{W}^l$ is the $D^{l - 1}\times D^{l}$ matrix of weights in that layer, and $\mathbf{A}^l$ is the $N\times D^{l}$ matrix of pre-activations, i.e. $\mathbf{A}^l = \mathbf{F}^{l - 1}\cdot \mathbf{W}^l$ . As usual, the columns and rows of $\mathbf{F}^l$ are denoted as $\mathbf{f}_d^l$ and $\mathbf{f}_{n,:}^l$ , respectively (and analogously for the other matrices). Since the activation is defined by a GP, we have $p(\mathbf{f}_d^l|\mathbf{a}_d^l) = \mathcal{N}(\mathbf{f}_d^l|\boldsymbol {\mu}_d^l,\mathbf{K}_d^l)$ , with $\boldsymbol{\mu}_d^l$ (resp. $\mathbf{K}_d^l$ ) the result of evaluating $\boldsymbol{\mu}_d^l$ (resp. $k_{d}^{l}$ ) on $\mathbf{a}_d^l$ (that is, $\boldsymbol{\mu}_d^l$ is a $N$ -dimensional vector and $\mathbf{K}_d^l$ is a $N\times N$ matrix). To fully specify the model, the output $\mathbf{Y}$ is defined from the last layer with a distribution that factorizes across data points, i.e. $\mathrm{p}(\mathbf{Y}|\mathbf{F}^L) = \prod_{n = 1}^{N}\mathrm{p}(\mathbf{y}_{n,:}|\mathbf{f}_{n,:}^L)$ . This formulation resembles that of DGPs (Damianou & Lawrence, 2013; Salimbeni & Deisenroth, 2017). The main difference is that we model $\mathbf{F}^l|\mathbf{F}^{l - 1}$ through $D^{l}$ 1D GPs evaluated on the pre-activations $\mathbf{A}^l$ (i.e. the projections of $\mathbf{F}^{l - 1}$ through $\mathbf{W}^l$ ), whereas DGPs use $D^{l}$ GPs of dimension $D^{l - 1}$ evaluated directly on $\mathbf{F}^{l - 1}$ , recall Figure 1(c-d).
|
| 67 |
+
|
| 68 |
+
Variational Inference. Inference in the proposed model is intractable. To address this, we follow standard sparse variational GP approaches (Titsias, 2009; Hensman et al., 2013; 2015), similarly to the Doubly Stochastic Variational Inference (DSVI) for DGPs (Salimbeni & Deisenroth, 2017). Specifically, in each unit of each layer we introduce $M^l$ inducing values $\mathbf{u}_d^l$ , which are the result of evaluating the GP on the one-dimensional inducing points $\mathbf{z}_d^l$ . We naturally write $\mathbf{U}^l$ and $\mathbf{Z}^l$ for the corresponding $M^l \times D^l$ matrices associated to the $l$ -th layer, respectively. Following eq. (1), the augmented model for one layer is
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
\mathrm {p} (\mathbf {F} ^ {l}, \mathbf {U} ^ {l} | \mathbf {F} ^ {l - 1}, \mathbf {W} ^ {l}, \mathbf {Z} ^ {l}) = \mathrm {p} (\mathbf {F} ^ {l} | \mathbf {U} ^ {l}, \mathbf {A} ^ {l}, \mathbf {Z} ^ {l}) \mathrm {p} (\mathbf {U} ^ {l} | \mathbf {Z} ^ {l}) = \prod_ {d = 1} ^ {D ^ {l}} \mathrm {p} (\mathbf {f} _ {d} ^ {l} | \mathbf {u} _ {d} ^ {l}, \mathbf {a} _ {d} ^ {l}, \mathbf {z} _ {d} ^ {l}) \mathrm {p} (\mathbf {u} _ {d} ^ {l} | \mathbf {z} _ {d} ^ {l}). \quad (2)
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
Variational inference (VI) involves the approximation of the true posterior $\mathrm{p}(\{\mathbf{F}^l, \mathbf{U}^l\}_l|\mathbf{Y})$ . Following (Hensman et al., 2013; Salimbeni & Deisenroth, 2017), we propose a posterior given by $\mathrm{p}(\mathbf{F}|\mathbf{U})$ and a parametric Gaussian on $\mathbf{U}$ :
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
\mathrm {q} \left(\left\{\mathbf {F} ^ {l}, \mathbf {U} ^ {l} \right\} _ {l}\right) = \prod_ {l = 1} ^ {L} \mathrm {p} \left(\mathbf {F} ^ {l} \mid \mathbf {U} ^ {l}, \mathbf {A} ^ {l}, \mathbf {Z} ^ {l}\right) \mathrm {q} \left(\mathbf {U} ^ {l}\right) = \prod_ {l = 1} ^ {L} \prod_ {d = 1} ^ {D ^ {l}} \mathrm {p} \left(\mathbf {f} _ {d} ^ {l} \mid \mathbf {u} _ {d} ^ {l}, \mathbf {a} _ {d} ^ {l}, \mathbf {z} _ {d} ^ {l}\right) \mathrm {q} \left(\mathbf {u} _ {d} ^ {l}\right), \tag {3}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
where $\mathrm{q}(\mathbf{u}_d^l) = \mathcal{N}(\mathbf{u}_d^l |\mathbf{m}_d^l,\mathbf{S}_d^l)$ , with $\mathbf{m}_d^l\in \mathbb{R}^{M^l}$ and $\mathbf{S}_d^l\in \mathbb{R}^{M^l\times M^l}$ variational parameters to be estimated. Minimizing the KL divergence between $\mathrm{q}(\{\mathbf{F}^l,\mathbf{U}^l\} _l)$ and the true posterior is equivalent to maximizing the following evidence lower bound (ELBO):
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
\log \mathrm {p} (\mathbf {Y} | \{\mathbf {W} ^ {l}, \mathbf {Z} ^ {l} \} _ {l}) \geq \operatorname {E L B O} = \sum_ {n = 1} ^ {N} \mathbb {E} _ {\mathrm {q} \left(\mathbf {f} _ {n,:} ^ {L}\right)} \left[ \log \mathrm {p} \left(\mathbf {y} _ {n,:} \mid \mathbf {f} _ {n,:} ^ {L}\right) \right] - \sum_ {l = 1} ^ {L} \sum_ {d = 1} ^ {D ^ {l}} \operatorname {K L} \left(\mathrm {q} \left(\mathbf {u} _ {d} ^ {l}\right) \mid \mid \mathrm {p} \left(\mathbf {u} _ {d} ^ {l}\right)\right). \tag {4}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
In the ELBO, the KL term can be computed in closed-form, as both $\mathrm{q}(\mathbf{u}_d^l)$ and $\mathrm{p}(\mathbf{u}_d^l)$ are Gaussians. The log likelihood term can be approximated by sampling from the marginal posterior $\mathrm{q}(\mathbf{f}_{n,:}^L)$ , which can be done efficiently through univariate Gaussians as in (Salimbeni & Deisenroth, 2017). Specifically, $\mathbf{U}^l$ can be analytically marginalized in eq. (3), which yields $\mathrm{q}(\{\mathbf{F}^l\}^l) = \prod_l \mathrm{q}(\mathbf{F}^l | \mathbf{F}^{l-1}, \mathbf{W}^l) = \prod_{l,d} \mathcal{N}(\mathbf{f}_d^l | \tilde{\boldsymbol{\mu}}_d^l, \tilde{\boldsymbol{\Sigma}}_d^l)$ , with:
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\left[ \tilde {\boldsymbol {\mu}} _ {d} ^ {l} \right] _ {i} = \mu_ {d} ^ {l} \left(a _ {i d} ^ {l}\right) + \boldsymbol {\alpha} _ {d} ^ {l} \left(a _ {i d} ^ {l}\right) ^ {\intercal} \left(\mathbf {m} _ {d} ^ {l} - \mu_ {d} ^ {l} \left(\mathbf {z} _ {d} ^ {l}\right)\right), \tag {5}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
[ \tilde {\boldsymbol {\Sigma}} _ {d} ^ {l} ] _ {i j} = k _ {d} ^ {l} \left(a _ {i d} ^ {l}, a _ {j d} ^ {l}\right) - \boldsymbol {\alpha} _ {d} ^ {l} \left(a _ {i d} ^ {l}\right) ^ {\intercal} \left(k _ {d} ^ {l} \left(\mathbf {z} _ {d} ^ {l}\right) - \mathbf {S} _ {d} ^ {l}\right) \boldsymbol {\alpha} _ {d} ^ {l} \left(a _ {j d} ^ {l}\right), \tag {6}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
where $\pmb{\alpha}_{d}^{l}(x) = k_{d}^{l}(x,\mathbf{z}_{d}^{l})[k_{d}^{l}(\mathbf{z}_{d}^{l})]^{-1}$ and $\mathbf{a}_{n,:}^{l} = \mathbf{W}^{l}\mathbf{f}_{n,:}^{l - 1}$ . Importantly, the marginal posterior $\mathrm{q}(\mathbf{f}_{n,:}^{l})$ is a Gaussian that depends only on $\mathbf{a}_{n,:}^{l}$ , which in turn only depends on $\mathrm{q}(\mathbf{f}_{n,:}^{l - 1})$ . Therefore, sampling from $\mathbf{f}_{n,:}^{l}$ is straightforward using the reparametrization trick (Kingma & Welling, 2013):
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
f _ {n d} ^ {l} = \left[ \tilde {\boldsymbol {\mu}} _ {d} ^ {l} \right] _ {n} + \varepsilon \cdot \left[ \tilde {\boldsymbol {\Sigma}} _ {d} ^ {l} \right] _ {n n} ^ {1 / 2}, \quad \text {w i t h} \varepsilon \sim \mathcal {N} (0, 1), \quad \text {a n d} \mathbf {f} _ {n,:} ^ {0} = \mathbf {x} _ {n,:}. \tag {7}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
Training consists in maximizing the ELBO, eq. (4), w.r.t. variational parameters $\{\mathbf{m}_d^l,\mathbf{S}_d^l\}$ , inducing points $\{\mathbf{z}_d^l\}$ , and model parameters (i.e. weights $\{\mathbf{w}_d^l\}$ and kernel parameters $\{\pmb {\theta}_d^l\}$ ). This can be done in batches, allowing for scalability to very large datasets. The complexity to evaluate the ELBO is $\mathcal{O}(NM^{2}(D^{1} + \dots +D^{L}))$ , the same as DGPs with DSVI (Salimbeni & Deisenroth, 2017).
|
| 103 |
+
|
| 104 |
+
Predictions. Given a new $\mathbf{x}_{*,:}$ , we want to compute $^4$ $\mathrm{p}(\mathbf{f}_{*,:}^L|\mathbf{X},\mathbf{Y})\approx \mathbb{E}_{\mathrm{q}(\{\mathbf{U}^l\})}\left[\mathrm{p}(\mathbf{f}_{*,:}^L|\{\mathbf{U}^l\})\right]$ . As in (Salimbeni & Deisenroth, 2017), this can be approximated by sampling $S$ values up to the $(L - 1)$ -th layer with the same eq. (7), but starting with $\mathbf{x}_{*,:}$ . Then, $\mathrm{p}(\mathbf{f}_{*,:}^L|\mathbf{X},\mathbf{Y})$ is given by the mixture of the $S$ Gaussians distributions obtained from eqs. (5)-(6).
|
| 105 |
+
|
| 106 |
+
Triangular kernel. One of the most popular kernels in GPs is the RBF (Williams & Rasmussen, 2006), which produces very smooth functions. However, the ReLu non-linearity led to a general boost in performance in DNNs (Nair & Hinton, 2010; Glorot et al., 2011), and we aim to model similar activations. Therefore, we introduce the use of the triangular (TRI) kernel. Just like RBF, TRI is an isotropic kernel, i.e. it depends on the distance between the inputs, $k(x,y) = \gamma \cdot g(|x - y| / \ell)$ , with $\gamma$ and $\ell$ the amplitude and lengthscale. For RBF, $g(t) = e^{-t^2 /2}$ . For TRI, $g(t) = \max (1 - t,0)$ . This is a valid kernel (Williams & Rasmussen, 2006, Section 4.2.1). Similarly to the ReLu, the functions modelled by TRI are piecewise linear, see Figure 6a in the main text and Figure 8 in Appendix C.
|
| 107 |
+
|
| 108 |
+
Comparison with DGP. The difference between auNN and DGP units is graphically illustrated in Figure 2a. Whereas DGP mappings from one layer to the next are complex functions defined on $D^{l-1}$ dimensions ( $D^{l-1} = 2$ in the figure), auNN mappings are defined just along one direction via the weight projection. This is closer in spirit to NNs, whose mappings are also simpler and better suited for feature extraction and learning more abstract concepts. Moreover, since the GP is defined on a 1D space, auNN requires fewer inducing points than DGP (which, intuitively, can be interpreted as inducing (hyper)planes in the $D^{l-1}$ -dimensional space before the projection).
|
| 109 |
+
|
| 110 |
+
# 3 EXPERIMENTS
|
| 111 |
+
|
| 112 |
+
In this section, auNN is compared to BNN, fBNN (Sun et al., 2019) and DSVI DGP (Salimbeni & Deisenroth, 2017). BNNs are trained with BBP (Blundell et al., 2015), since auNN also leverages a simple VI-based inference approach. In each section we will highlight the most relevant experimental aspects, and all the details can be found in Appendix B. In the sequel, NLL stands for Negative Log Likelihood. Anonymized code for auNN is provided in the supplementary material, along with a script to run it for the 1D illustrative example of Section 3.1.
|
| 113 |
+
|
| 114 |
+
# 3.1 AN ILLUSTRATIVE EXAMPLE
|
| 115 |
+
|
| 116 |
+
Here we illustrate the two aspects that were highlighted in the introduction: the underestimation of predictive uncertainty for instances located in-between two clusters of training points and the
|
| 117 |
+
|
| 118 |
+

|
| 119 |
+
Figure 3: Predictive distribution (mean and one standard deviation) after training on a 1D dataset with two clusters of points. This simple example illustrates the main limitations of NN, BNN and fBNN, which are overcome by the novel auNN. See Table 1 for a summary and the text for details.
|
| 120 |
+
|
| 121 |
+

|
| 122 |
+
|
| 123 |
+

|
| 124 |
+
|
| 125 |
+
Table 1: Visual overview of conclusions from the 1D experiment in Figure 3. This shows that NN, BNN, fBNN and auNN increasingly expand their capabilities.
|
| 126 |
+
|
| 127 |
+
<table><tr><td></td><td>Epistemic uncertainty</td><td>Reverts to the mean</td><td>In-between uncertainty</td></tr><tr><td>NN</td><td>X</td><td>X</td><td>X</td></tr><tr><td>BNN</td><td>✓</td><td>X</td><td>X</td></tr><tr><td>fBNN</td><td>✓</td><td>✓</td><td>X</td></tr><tr><td>auNN</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
|
| 128 |
+
|
| 129 |
+
extrapolation to OOD data. Figure 3 shows the predictive distribution of NN, BNN, fBNN and auNN (with RBF and TRI kernels) after training on a simple 1D dataset with two clusters of points. All the methods have one hidden layer with 25 units, and 5 inducing points are used for auNN.
|
| 130 |
+
|
| 131 |
+
In Figure 3, the deterministic nature of NNs prevents them from providing epistemic uncertainty (i.e. the one originating from the model (Kendall & Gal, 2017)). Moreover, there is no prior to guide the extrapolation to OOD data. BNNs provide epistemic uncertainty. However, the prior in the complex space of weights does not allow for guiding the extrapolation to OOD data (e.g. by reverting to the empirical mean). Moreover, note that BNNs underestimate the predictive uncertainty in the region between the two clusters, where there is no observed data (this region is usually called the gap). More specifically, as shown in (Foong et al., 2020), the predictive uncertainty for data points in the gap is limited by that on the extremes. By specifying the prior in function space, fBNN can induce properties in the output, such as reverting to the empirical mean for OOD data through a zero-mean GP prior. However, the underestimation of in-between uncertainty persists, since the posterior stochastic process for fBNN is based on a weight-space factorized Gaussian (as BNN with BBP), see (Sun et al., 2019, Section 3.1) for details. Finally, auNN (either with RBF or TRI kernel) addresses both aspects through the novel activation-level modelling of uncertainty, which utilizes a zero-mean GP prior for the activations. Table 1 summarizes the main characteristics of each method. Next, a more comprehensive experiment with deeper architectures and more complex multidimensional datasets is provided.
|
| 132 |
+
|
| 133 |
+
# 3.2 UCI REGRESSION DATASETS WITH GAP Splits
|
| 134 |
+
|
| 135 |
+
Standard splits are not appropriate to evaluate the quality of uncertainty estimates for in-between data, since both train and test sets may cover the space equally. This motivated the introduction of gap splits (Foong et al., 2019). Namely, a set with $D$ dimensions admits $D$ such train-test partitions by considering each dimension, sorting the points according to its value, and selecting the middle $1/3$ for test (and the outer $2/3$ for training), see Figure 2c. With these partitions, overconfident predictions for data points in the gap manifest as very high values of test negative log likelihood.
|
| 136 |
+
|
| 137 |
+
Using the gap splits, it was recently shown that BNNs yield overconfident predictions for in-between data (Foong et al., 2019). The authors highlight the case of Energy and Naval datasets, where BNNs fail catastrophically. Figure 4a reproduces these results for BNNs and checks that fBNNs also obtain overconfident predictions, as theoretically expected. However, notice that activation-level stochasticity performs better, specially through the triangular kernel, which dramatically improves the results (see the plot scale). Figure 4b confirms that the difference is due to the underestimation of uncertainty, since the predictive performance in terms of RMSE is on a similar scale for all the methods. In all cases, $D = 50$ hidden units are used, and auNN uses $M = 10$ inducing points.
|
| 138 |
+
|
| 139 |
+
To further understand the intuition behind the different results, Figure 5 shows the predictive distribution over a segment that crosses the gap, recall Figure 2c. We observe that activation-level approaches obtain more sensitive (less confident) uncertainties in the gap, where there is no observed data. For instance, BNN and fBNN predictions in Naval are unjustifiably overconfident, since the output in that dataset ranges from 0.95 to 1. Also, to illustrate the internal mechanism of auNN, Figure 6a shows one example of the activations learned when using each kernel. Although it is just one example, it allows for visualising the different nature: smoother for RBF and piecewise linear for TRI. All the activations for a particular network and for both kernels are shown in Appendix C (Figure 8).
|
| 140 |
+
|
| 141 |
+

|
| 142 |
+
(a)
|
| 143 |
+
|
| 144 |
+

|
| 145 |
+
(b)
|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
Figure 4: Test NLL (a) and RMSE (b) for the gap splits in Energy and Naval datasets (mean and one standard error, the lower the better). Activation-level uncertainty, specially through the triangular kernel, avoids the dramatic failure of BNN and fBNN in terms of NLL (see the scale). The similar values in RMSE reveal that this failure actually comes from an extremely overconfident estimation by BNN and fBNN, see also Figure 5.
|
| 149 |
+
Figure 5: Predictive distribution (mean and one standard deviation) over a segment that crosses the gap, joining two training points from different connected components. auNN avoids overconfident predictions by allocating more uncertainty in the gap, where there is no observed data.
|
| 150 |
+
|
| 151 |
+
In addition to the paradigmatic cases of Energy and Naval illustrated here, four more datasets are included in Appendix C. Figure 7 there is analogous to Figure 4 here, and Tables 4 and 5 there show the full numeric results and ranks. We observe that auNN, specially through the triangular kernel, obtains the best results and does not fail catastrophically in any dataset (unlike BNN and fBNN, which do in Energy and Naval). Finally, the performance on the gap splits is complemented by that on standard splits, see Tables 6 and 7 in Appendix C. This shows that, in addition to the enhanced uncertainty estimation, auNN is a competitive alternative in general practice.
|
| 152 |
+
|
| 153 |
+
# 3.3 COMPARISON WITH DGPS
|
| 154 |
+
|
| 155 |
+
As explained in Section 2, the choice of a GP prior for activation stochasticity establishes a strong connection with DGPs. The main difference is that auNN performs a linear projection from $D^{l-1}$ to $D^l$ dimensions before applying $D^l$ 1D GPs, whereas DGPs define $D^l$ GPs directly on the $D^{l-1}$ dimensional space. This means that auNN units are simpler than those of DGP, recall Figure 2a. Here we show two practical implications of this.
|
| 156 |
+
|
| 157 |
+
First, it is reasonable to hypothesise that DGP units may require a higher number of inducing points $M$ than auNN, since they need to cover a multi-dimensional input space. By contrast, auNN may require a higher number of hidden units $D$ , since these are simpler. Importantly, the computational cost is not symmetric in $M$ and $D$ , but significantly cheaper on $D$ , recall Section 2. Figure 6b shows the performance of auNN and DGP for different values of $M$ and $D$ on the UCI Kin8 set (with one hidden layer; depth will be analyzed next). As expected, note the different influence by $M$ and $D$ : whereas auNN improves "by rows" (i.e. as $D$ grows), DGP does it "by columns" (i.e. as $M$ grows) $^5$ . Next section (Section 3.4), will show that this makes auNN faster than DGP in practice. An analogous figure for RMSE and full numeric results are in Appendix C (Figure 9 and Tables 9-10).
|
| 158 |
+
|
| 159 |
+
Second, auNN simpler units might be better suited for deeper architectures. Figure 6c shows the performance on the UCI Power dataset when depth is additionally considered. It can be observed that auNN is able to take greater advantage of depth, which translates into better overall performance.
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
Figure 6: (a) One example of activation function (mean and standard deviation) learned by auNN with each kernel. RBF's one is smoother, whereas TRI's is piecewise linear, inspired by ReLu. Black dots represent (the mean of) the inducing point values. Green dots are the locations of input data when propagated to the corresponding unit. (b) Test NLL of auNN and DGP for different values of $M$ (number of inducing points) and $D$ (number of hidden units). The lower the better. The results are the average over five independent runs with different splits. Whereas DGP improves "by columns" (i.e. with $M$ ), auNN does it "by rows" (i.e. with $D$ ). This is as hypothesized, and is convenient from a scalability viewpoint. (c) Test NLL with increasing depth ( $L = 2, 3, 4$ ). This supports that auNN might benefit more than DGP from deeper networks. Moreover, the aforementioned different influence of $M$ and $D$ on DGP and auNN is confirmed here.
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
|
| 166 |
+
Moreover, the aforementioned different influence of $D$ and $M$ on DGP and on auNN is also confirmed here. The results on RMSE are similar, see Figure 10 and Tables 11-12 in Appendix C.
|
| 167 |
+
|
| 168 |
+
Finally, it may be argued that auNN closely resembles a DGP with additive kernel (Duvenaud et al., 2011; Durrande et al., 2011) (DGP-add hereafter). Recall that an additive kernel models functions that are decomposed as $f(\mathbf{x}) = f_1(x_1) + \dots + f_D(x_D)$ . Therefore, the model for $a^{l+1}|a^l$ in auNN is very similar to that of $f^{l+1}|f^l$ in DGP-add, see Figure 11 in Appendix C. Specifically, in both cases, the input $(a^l$ in auNN, $f^l$ in DGP-add) goes through 1D GPs and then these are aggregated (linear combination through W in auNN, summation in DGP-add) to yield the output $(a^{l+1}$ in auNN, $f^{l+1}$ in DGP-add). However, there exists a key difference. In auNN, all the nodes in the $(l+1)$ -th layer (i.e. $a_i^{l+1}$ ) aggregate a shared set of distinct functions (namely, $f_i^l$ ), each node using its own weights to aggregate them. While in DGP-add, there is not such shared set of functions, and each node in the $(l+1)$ -th layer (i.e. $f_i^{l+1}$ ) aggregates a different set of GP realizations (i.e. the unlabelled blue nodes in Figure 11c). This subtle theoretical difference has empirical implications, since many more functions need to be learned for DGP-add. Indeed, Figures 12 and 13 in Appendix C compare the performance of DGP-add and auNN-RBF (the experimental setting is analogous to that of Figure 6c)<sup>6</sup>. We observe that the results obtained by DGP-add are worse than those by auNN-RBF, probably due to the larger number of functions that need to be learned in DGP-add.
|
| 169 |
+
|
| 170 |
+
# 3.4 CLASSIFICATION, SCALABILITY, AND ADDITIONAL METRICS
|
| 171 |
+
|
| 172 |
+
So far, we have experimented with small to medium regression datasets, and uncertainty estimation has been measured through the (negative) log likelihood and the visual inspection of the predictive distribution (Figures 3 and 5). Here we focus on two large scale classification datasets (up to $10^{7}$ instances), and additional metrics that account for uncertainty calibration are reported. We use the well-known particle physics binary classification sets HIGGS ( $N = 11\mathrm{M}$ , $D = 28$ ) and SUSY ( $N = 5\mathrm{M}$ , $D = 18$ ) (Baldi et al., 2014). We consider DGP as a baseline, as it obtained state-of-the-art results for these datasets (Salimbeni & Deisenroth, 2017). For all the methods, we consider a Robust-Max classification likelihood (Hernández-Lobato et al., 2011).
|
| 173 |
+
|
| 174 |
+
The metrics to be used are the Brier score (Gneiting & Raftery, 2007) and the Expected Calibration Error (ECE) (Guo et al., 2017). The former is a proper score function that measures the accuracy of probabilistic predictions for categorical variables. In practice, it is computed as the mean squared
|
| 175 |
+
|
| 176 |
+
Table 2: Brier score and expected calibration error (ECE) for auNN and DGP in the large scale classification datasets HIGGS and SUSY (the lower the better in both metrics). The standard error (on three splits) is close to zero in all cases, see Table 13 in Appendix C.
|
| 177 |
+
|
| 178 |
+
<table><tr><td rowspan="2" colspan="2"></td><td rowspan="2">N</td><td rowspan="2">D</td><td colspan="6">auNN</td><td colspan="3">DGP</td></tr><tr><td>RBF-2</td><td>RBF-3</td><td>RBF-4</td><td>TRI-2</td><td>TRI-3</td><td>TRI-4</td><td>DGP-2</td><td>DGP-3</td><td>DGP-4</td></tr><tr><td rowspan="2">Brier</td><td>HIGGS</td><td>11M</td><td>28</td><td>0.3363</td><td>0.3159</td><td>0.3098</td><td>0.3369</td><td>0.3172</td><td>0.3118</td><td>0.4527</td><td>0.4399</td><td>0.4378</td></tr><tr><td>SUSY</td><td>5M</td><td>18</td><td>0.2746</td><td>0.2739</td><td>0.2737</td><td>0.2749</td><td>0.2742</td><td>0.2738</td><td>0.3815</td><td>0.3816</td><td>0.3804</td></tr><tr><td rowspan="2">ECE</td><td>HIGGS</td><td>11M</td><td>28</td><td>0.2196</td><td>0.2383</td><td>0.2427</td><td>0.2198</td><td>0.2390</td><td>0.2397</td><td>0.4352</td><td>0.4303</td><td>0.4251</td></tr><tr><td>SUSY</td><td>5M</td><td>18</td><td>0.3453</td><td>0.3496</td><td>0.3504</td><td>0.3462</td><td>0.3485</td><td>0.3465</td><td>0.5304</td><td>0.5291</td><td>0.5273</td></tr></table>
|
| 179 |
+
|
| 180 |
+
Table 3: Average training time per batch over 50 independent runs (in seconds). The standard error is low in all cases, see Table 14 in Appendix C.
|
| 181 |
+
|
| 182 |
+
<table><tr><td rowspan="2"></td><td colspan="6">auNN</td><td colspan="3">DGP</td></tr><tr><td>RBF-2</td><td>RBF-3</td><td>RBF-4</td><td>TRI-2</td><td>TRI-3</td><td>TRI-4</td><td>DGP-2</td><td>DGP-3</td><td>DGP-4</td></tr><tr><td>HIGGS</td><td>0.0962</td><td>0.1607</td><td>0.2259</td><td>0.0922</td><td>0.1647</td><td>0.2308</td><td>0.1918</td><td>0.3102</td><td>0.3930</td></tr><tr><td>SUSY</td><td>0.0926</td><td>0.1564</td><td>0.2245</td><td>0.0923</td><td>0.1563</td><td>0.2265</td><td>0.1430</td><td>0.2129</td><td>0.2771</td></tr></table>
|
| 183 |
+
|
| 184 |
+
difference between a one-dimensional vector with the probability for each class label and the one-hot encoding of the actual class. The latter measures miscalibration as the difference in expectation between confidence and accuracy. This is done by partitioning the predictions in $M$ equally spaced bins and taking a weighted average of the bins' accuracy/confidence difference, see (Guo et al., 2017, Eq.(3)) for details.
|
| 185 |
+
|
| 186 |
+
Table 2 shows the Brier score and ECE for auNN and DGP for different values of $L$ (depth). We observe that auNN outperforms DGP in both metrics, achieving superior uncertainty estimation. Both TRI and RBF kernels obtain similar results for auNN. Notice that the Brier score generally improves with the network depth, whereas the performance in ECE decreases with depth. Interestingly, this behavior was also observed for standard NNs (Guo et al., 2017, Figure 2a).
|
| 187 |
+
|
| 188 |
+
Finally, as was theoretically justified in Section 2, auNN can scale up to very large datasets (HIGGS has more than $10^{7}$ training instances). Regarding the practical computational cost, Table 3 shows the average training time per batch for both auNN and DGP in the previous datasets. Although the theoretical complexity is analogous for both methods (recall Section 2), the experiments in Figures 6b-c showed that DGP requires larger values of $M$ , whereas auNN needs larger $D^{7}$ . Since the computational cost is not symmetric on $M$ and $D$ , but significantly cheaper in the latter (recall Section 2), auNN is faster than DGP in practice.
|
| 189 |
+
|
| 190 |
+
# 4 RELATED WORK
|
| 191 |
+
|
| 192 |
+
Activation-level uncertainty is introduced here as an alternative to weight-space stochasticity. The expressiveness of the latter has been recently analyzed in the recent work (Wenzel et al., 2020), where the authors advocate a modified BNN objective. Alternatively, different prior specifications are studied in (Hafner et al., 2020; Pearce et al., 2019; Flam-Shepherd et al., 2017), in addition to the fBNN discussed here (Sun et al., 2019). However, none of these works consider stochasticity on the activations.
|
| 193 |
+
|
| 194 |
+
Since we present a straightforward use of VI for auNN, in this work we have compared empirically with the well-known VI-based BBP for BNNs. Yet, we expect auNN to benefit from independent inference refinements like those proposed over the last years for BNNs. For instance, natural-gradient VI allows for leveraging techniques such as BatchNorm or data augmentation (Osawa et al., 2019), and the information contained in the SGD trajectory can be exploited as well (Maddox et al., 2019). Also, getting rid of the gradient variance through deterministic approximate moments has provided enhanced results in BNNs (Wu et al., 2019).
|
| 195 |
+
|
| 196 |
+
A key aspect of auNN is the modelling of the activation function. This element of neural nets has been analyzed before. For instance, self-normalizing neural nets (Klambauer et al., 2017) induce the normalization that is explicitly performed in related approaches such as BatchNorm (Ioffe & Szegedy, 2015) and weight and layer normalization (Salimans & Kingma, 2016; Ba et al., 2016). Learnable deterministic activations have been explored too, e.g. (He et al., 2015; Agostinelli et al., 2014). However, as opposed to auNN, in all these cases the activations are deterministic.
|
| 197 |
+
|
| 198 |
+
Probabilistic neural networks such as Natural-Parameter Networks (NPN) (Wang et al., 2016) propagate probability distributions through layers of transformations. Therefore, the values of the activations are also described by probability distributions (specifically, the exponential family is used in NPN). Fast dropout training (Wang & Manning, 2013) and certain variants of NPNs can be also viewed in this way (Shekhotsov & Flach, 2018; Postels et al., 2019). However, in auNN the activations are modelled themselves as stochastic learnable components that follow a GP prior. Along with the deterministic weights, this provides a conceptually different approach to model uncertainty.
|
| 199 |
+
|
| 200 |
+
A very preliminary study on GP-based activation functions is proposed in (Urban & van der Smagt, 2018). However, the method is not empirically evaluated, no connection with deep GPs is provided, and the inference approach is limited. Namely, the output of each unit is approximated with a Gaussian whose mean and covariance are computed in closed-form, as was done in (Bui et al., 2016) for DGPs. However, this is only tractable for the RBF kernel (in particular, it cannot leverage the more convenient TRI kernel studied here), and the Gaussian approximation typically yields worse results than Monte Carlo approximations to the ELBO as used here (indeed, DSVI (Salimbeni & Deisenroth, 2017) substantially improved the results for DGPs compared to (Bui et al., 2016)).
|
| 201 |
+
|
| 202 |
+
# 5 CONCLUSIONS AND FUTURE WORK
|
| 203 |
+
|
| 204 |
+
We proposed a novel approach for uncertainty estimation in neural network architectures. Whereas previous methods are mostly based on a Bayesian treatment of the weights, here we move the stochasticity to the activation functions, which are modelled with a simple 1D GP and a triangular kernel inspired by the ReLu. Our experiments show that the proposed method obtains better calibrated uncertainty estimates and is competitive in standard prediction tasks. Moreover, the connection with deep GPs is analyzed. Namely, our approach requires fewer inducing points and is better suited for deep architectures, achieving superior performance.
|
| 205 |
+
|
| 206 |
+
We hope this work raises interest on alternative approaches to model uncertainty in neural networks. One of the main directions of future research is to deeply understand the properties induced by each one of the kernels considered here (i.e. the triangular one and RBF). In particular, it would be interesting to automatically learn the optimal kernel for each unit in a probabilistic way. Also, the use of a GP prior for the activation function may hamper the scalability of auNN to wider and/or deeper networks. In these cases, the GP-based activation model could be substituted by a simpler Bayesian parametric one. This would allow for a cheaper modelling of uncertainty within the activations. Finally, since only the activation function is modified, important deep learning elements such as convolutional layers can be still incorporated.
|
| 207 |
+
|
| 208 |
+
# ACKNOWLEDGEMENTS
|
| 209 |
+
|
| 210 |
+
This work was supported by the "Agencia Estatal de Investigación" of the Spanish "Ministerio de Ciencia e Innovación" under contract PID2019-105142RB-C22/AEI/10.13039/501100011033, and the Spanish "Ministerio de Economía, Industria y Competitividad" under contract DPI2016-77869-C2-2-R. DHL acknowledges support from the Spanish "Ministerio de Ciencia e Innovación" (projects TIN2016-76406-P and PID2019-106827GB-I00/AEI/10.13039/501100011033). PMA was funded by La Caixa Banking Foundation (ID 100010434, Barcelona, Spain) through La Caixa Fellowship for Doctoral Studies LCF/BQ/ES17/11600011, and the University of Granada through the program "Proyectos de Investigación Precompétitos para Jóvenes Investigadores del Plan Propio 2019" (ref. PPJIB2019-03).
|
| 211 |
+
|
| 212 |
+
# REFERENCES
|
| 213 |
+
|
| 214 |
+
F. Agostinelli, M. Hoffman, P. Sadowski, and P. Baldi. Learning activation functions to improve deep neural networks. arXiv preprint arXiv:1412.6830, 2014.
|
| 215 |
+
|
| 216 |
+
J.L. Ba, J.R. Kiros, and G.E. Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016.
|
| 217 |
+
P. Baldi, P. Sadowski, and D. Whiteson. Searching for exotic particles in high-energy physics with deep learning. Nature communications, 5:4308, 2014.
|
| 218 |
+
D. Barber and C.M. Bishop. Ensemble learning for multi-layer networks. In Advances in neural information processing systems, pp. 395-401, 1998.
|
| 219 |
+
C. Blundell, J. Cornebise, K. Kavukcuoglu, and D. Wierstra. Weight uncertainty in neural network. In International conference on machine learning, pp. 1613-1622, 2015.
|
| 220 |
+
T. Bui, D. Hernández-Lobato, J.M. Hernández-Lobato, Y. Li, and R.E. Turner. Deep Gaussian processes for regression using approximate expectation propagation. In International conference on machine learning, pp. 1472-1481, 2016.
|
| 221 |
+
A. Damianou and N.D. Lawrence. Deep Gaussian processes. In International conference on artificial intelligence and statistics, pp. 207-215, 2013.
|
| 222 |
+
A.G. De G. Matthews, M. Van Der Wilk, T. Nickson, K. Fujii, A. Boukouvalas, P. León-Villagrá, Z. Ghahramani, and J. Hensman. Gpflow: A Gaussian process library using tensorflow. The Journal of Machine Learning Research, 18(1):1299-1304, 2017.
|
| 223 |
+
N. Durrande, D. Ginsbourger, and O. Roustant. Additive kernels for gaussian process modeling. arXiv preprint arXiv:1103.4023, 2011.
|
| 224 |
+
D.K. Duvenaud, H. Nickisch, and C.E. Rasmussen. Additive Gaussian processes. In Advances in neural information processing systems, pp. 226-234, 2011.
|
| 225 |
+
A. Esteva, B. Kuprel, R.A. Novoa, J. Ko, S.M. Swetter, H.M. Blau, and S. Thrun. Dermatologist-level classification of skin cancer with deep neural networks. Nature, 542(7639):115-118, 2017.
|
| 226 |
+
D. Flam-Shepherd, J. Requeima, and D. Duvenaud. Mapping Gaussian process priors to Bayesian neural networks. In NIPS Bayesian deep learning workshop, 2017.
|
| 227 |
+
A.Y.K. Foong, Y. Li, J.M. Hernandez-Lobato, and R.E. Turner. 'in-between'uncertainty in Bayesian neural networks. ICML 2019 Workshop on Uncertainty and Robustness in Deep Learning, 2019.
|
| 228 |
+
A.Y.K. Foong, D.R. Burt, Y. Li, and R.E. Turner. On the expressiveness of approximate inference in Bayesian neural networks. In Advances in neural information processing systems, 2020.
|
| 229 |
+
S. Fort, H. Hu, and B. Lakshminarayanan. Deep ensembles: A loss landscape perspective. arXiv preprint arXiv:1912.02757, 2019.
|
| 230 |
+
Y. Gal. Uncertainty in Deep Learning. PhD thesis, University of Cambridge, 2016.
|
| 231 |
+
Y. Gal and Z. Ghahramani. Dropout as a Bayesian approximation: Representing model uncertainty in deep learning. In International conference on machine learning, pp. 1050-1059, 2016.
|
| 232 |
+
X. Glorot and Y. Bengio. Understanding the difficulty of training deep feedforward neural networks. In International conference on artificial intelligence and statistics, pp. 249-256, 2010.
|
| 233 |
+
X. Glorot, A. Bordes, and Y. Bengio. Deep sparse rectifier neural networks. In International conference on artificial intelligence and statistics, pp. 315-323, 2011.
|
| 234 |
+
T. Gneiting and A.E. Raftery. Strictly proper scoring rules, prediction, and estimation. Journal of the American Statistical Association, 102(477):359-378, 2007.
|
| 235 |
+
C. Guo, G. Pleiss, Y. Sun, and K.Q. Weinberger. On calibration of modern neural networks. In International conference on machine learning, pp. 1321-1330, 2017.
|
| 236 |
+
D. Hafner, D. Tran, T. Lillicrap, A. Irpan, and J. Davidson. Noise contrastive priors for functional uncertainty. In Uncertainty in Artificial Intelligence, pp. 905-914, 2020.
|
| 237 |
+
K. He, X. Zhang, S. Ren, and J. Sun. Delving deep into rectifiers: Surpassing human-level performance on imagenet classification. In Proceedings of the IEEE international conference on computer vision, pp. 1026-1034, 2015.
|
| 238 |
+
|
| 239 |
+
J. Hensman, N. Fusi, and N.D. Lawrence. Gaussian processes for big data. In Uncertainty in Artificial Intelligence, pp. 282-290, 2013.
|
| 240 |
+
J. Hensman, A.G. De G. Matthews, and Z. Ghahramani. Scalable variational Gaussian process classification. In International conference on artificial intelligence and statistics, pp. 351-360, 2015.
|
| 241 |
+
D. Hernández-Lobato, J.M. Hernández-Lobato, and P. Dupont. Robust multi-class Gaussian process classification. In Advances in neural information processing systems, pp. 280-288, 2011.
|
| 242 |
+
G.E. Hinton and D. Van Camp. Keeping the neural networks simple by minimizing the description length of the weights. In Proceedings of the sixth annual conference on Computational learning theory, pp. 5-13, 1993.
|
| 243 |
+
G.E. Hinton, L. Deng, D. Yu, G.E. Dahl, A.R. Mohamed, N. Jaitly, A. Senior, V. Vanhoucke, P. Nguyen, T.R. Sainath, and B. Kingsbury. Deep neural networks for acoustic modeling in speech recognition: The shared views of four research groups. IEEE Signal processing magazine, 29(6): 82-97, 2012.
|
| 244 |
+
S. Ioffe and C. Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In International conference on machine learning, pp. 448-456, 2015.
|
| 245 |
+
A. Kendall and Y. Gal. What uncertainties do we need in Bayesian deep learning for computer vision? In Advances in neural information processing systems, pp. 5574-5584, 2017.
|
| 246 |
+
D.P. Kingma and J. Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.
|
| 247 |
+
D.P. Kingma and M. Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.
|
| 248 |
+
G. Klambauer, T. Unterthiner, A. Mayr, and S. Hochreiter. Self-normalizing neural networks. In Advances in neural information processing systems, pp. 971-980, 2017.
|
| 249 |
+
A. Krizhevsky, I. Sutskever, and G.E. Hinton. Imagenet classification with deep convolutional neural networks. In Advances in neural information processing systems, pp. 1097-1105, 2012.
|
| 250 |
+
B. Lakshminarayanan, A. Pritzel, and C. Blundell. Simple and scalable predictive uncertainty estimation using deep ensembles. In Advances in neural information processing systems, pp. 6402-6413, 2017.
|
| 251 |
+
D.J.C. MacKay. A practical Bayesian framework for backpropagation networks. Neural computation, 4(3):448-472, 1992.
|
| 252 |
+
W.J. Maddox, P. Izmailov, T. Garipov, D.P. Vetrov, and A.G. Wilson. A simple baseline for Bayesian uncertainty in deep learning. In Advances in neural information processing systems, pp. 13132-13143, 2019.
|
| 253 |
+
T. Mikolov, K. Chen, G. Corrado, and J. Dean. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781, 2013.
|
| 254 |
+
A. Mobiny, A. Singh, and H. Van Nguyen. Risk-aware machine learning classifier for skin lesion diagnosis. Journal of clinical medicine, 8(8):1241, 2019.
|
| 255 |
+
V. Nair and G.E. Hinton. Rectified linear units improve restricted boltzmann machines. In International conference on machine learning, pp. 807-814, 2010.
|
| 256 |
+
R.M. Neal. Bayesian Learning for Neural Networks. PhD thesis, University of Toronto, 1995.
|
| 257 |
+
A. Nguyen, J. Yosinski, and J. Clune. Deep neural networks are easily fooled: High confidence predictions for unrecognizable images. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 427-436, 2015.
|
| 258 |
+
|
| 259 |
+
K. Osawa, S. Swaroop, M.E.E. Khan, A. Jain, R. Eschenhagen, R.E. Turner, and R. Yokota. Practical deep learning with Bayesian principles. In Advances in neural information processing systems, pp. 4289-4301, 2019.
|
| 260 |
+
T. Pearce, R. Tsuchida, M. Zaki, A. Brintrup, and A. Neely. Expressive priors in Bayesian neural networks: Kernel combinations and periodic functions. In Uncertainty in Artificial Intelligence, 2019.
|
| 261 |
+
J. Postels, F. Ferroni, H. Coskun, N. Navab, and F. Tombari. Sampling-free epistemic uncertainty estimation using approximated variance propagation. In Proceedings of the IEEE international conference on computer vision, pp. 2931-2940, 2019.
|
| 262 |
+
J. Ren, P.J. Liu, E. Fertig, J. Snoek, R. Poplin, M. Depristo, J. Dillon, and B. Lakshminarayanan. Likelihood ratios for out-of-distribution detection. In Advances in neural information processing systems, pp. 14680-14691, 2019.
|
| 263 |
+
T. Salimans and D.P. Kingma. Weight normalization: A simple reparameterization to accelerate training of deep neural networks. In Advances in neural information processing systems, pp. 901-909, 2016.
|
| 264 |
+
H. Salimbeni and M. Deisenroth. Doubly stochastic variational inference for deep Gaussian processes. In Advances in neural information processing systems, pp. 4588-4599, 2017.
|
| 265 |
+
A. Shekhotsov and B. Flach. Feed-forward propagation in probabilistic neural networks with categorical and max layers. In International conference on learning representations, 2018.
|
| 266 |
+
J. Shi, S. Sun, and J. Zhu. A spectral approach to gradient estimation for implicit distributions. In International conference on machine learning, pp. 4644-4653, 2018.
|
| 267 |
+
J. Snoek, Y. Ovadia, E. Fertig, B. Lakshminarayanan, S. Nowozin, D. Sculley, J. Dillon, J. Ren, and Z. Nado. Can you trust your model's uncertainty? evaluating predictive uncertainty under dataset shift. In Advances in neural information processing systems, pp. 13969-13980, 2019.
|
| 268 |
+
S. Sun, G. Zhang, J. Shi, and R. Grosse. Functional variational Bayesian neural networks. In International conference on learning representations, 2019.
|
| 269 |
+
M. Titsias. Variational learning of inducing variables in sparse Gaussian processes. In International conference on artificial intelligence and statistics, pp. 567-574, 2009.
|
| 270 |
+
S. Urban and P. van der Smagt. Gaussian process neurons. https://openreview.net/forum?id=By-IifzRW, 2018. Accessed: 2020-05-15.
|
| 271 |
+
H. Wang, X. Shi, and D.Y. Yeung. Natural-parameter networks: A class of probabilistic neural networks. Advances in neural information processing systems, pp. 118-126, 2016.
|
| 272 |
+
S. Wang and C. Manning. Fast dropout training. In International conference on machine learning, pp. 118-126, 2013.
|
| 273 |
+
F. Wenzel, K. Roth, B.S. Veeling, J. Światkowski, L. Tran, S. Mandt, J. Snoek, T. Salimans, R. Jenatton, and S. Nowozin. How good is the bayes posterior in deep neural networks really? arXiv preprint arXiv:2002.02405, 2020.
|
| 274 |
+
C.K.I. Williams and C.E. Rasmussen. Gaussian processes for machine learning, volume 2. MIT press Cambridge, MA, 2006.
|
| 275 |
+
A. Wu, S. Nowozin, E. Meeds, R.E. Turner, Hernández-Lobato J.M., and A.L. Gaunt. Deterministic variational inference for robust bayesian neural networks. In International Conference on Learning Representations, 2019.
|
| 276 |
+
J. Yao, W. Pan, S. Ghosh, and F. Doshi-Velez. Quality of uncertainty quantification for bayesian neural network inference. arXiv preprint arXiv:1906.09686, 2019.
|
| 277 |
+
|
| 278 |
+
# A PRACTICAL SPECIFICATIONS FOR AUNN
|
| 279 |
+
|
| 280 |
+
Whitening transformation for $\mathrm{q}(\mathbf{u}_d^l)$ . The proposed parametric posterior for each unit is given by the Gaussian $\mathrm{q}(\mathbf{u}_d^l) = \mathcal{N}(\mathbf{u}_d^l|\mathbf{m}_d^l,\mathbf{S}_d^l)$ . The GP prior on $\mathbf{u}_d^l$ is $\mathrm{p}(\mathbf{u}_d^l) = \mathcal{N}(\mathbf{u}_d^l|\boldsymbol{\mu}_d^l,\mathbf{K}_d^l)$ , with $\boldsymbol{\mu}_d^l = \mu_d^l (\mathbf{z}_d^l)$ and $\mathbf{K}_d^l = k_d^l (\mathbf{z}_d^l,\mathbf{z}_d^l)$ . For numerical stability and to reduce the amount of operations, we use a white representation for $\mathrm{q}(\mathbf{u}_d^l)$ , as is common practice in (D)GPs (De G. Matthews et al., 2017; Salimbeni & Deisenroth, 2017). That is, we consider the variable $\mathbf{v}_d^l\sim \mathcal{N}(\tilde{\mathbf{m}}_d^l,\tilde{\mathbf{S}}_d^l)$ , with $\mathbf{u}_d^l = \boldsymbol{\mu}_d^l +(\mathbf{K}_d^l)^{1 / 2}\mathbf{v}_d^l$ . Specifically, in the code the variable $\tilde{\mathbf{m}}_d^l$ is denoted as q mu, and $\tilde{\mathbf{S}}_d^l$ is represented through its Cholesky factorization $(\tilde{\mathbf{S}}_d^l)^{1 / 2}$ , which is named q sqrt.
|
| 281 |
+
|
| 282 |
+
Initialization of the variational parameters $\{\mathbf{m}_d^l\}$ . These are the mean of the posterior distribution on the inducing points. Therefore, their value determines the initialization of the activation function. If the RBF kernel is used, $\{\mathbf{m}_d^l\}$ are initialized to the prior $\pmb{\mu}_d^l = \mu_d^l (\mathbf{z}_d^l)$ (since we are using the aforementioned white representation, $\mathrm{q\_mu}$ is initialized to zero). This is the most standard initialization in GP literature. For the TRI kernel, $\{\mathbf{m}_d^l\}$ are initialized according to the ReLu which TRI is inspired by, i.e. $\mathbf{m}_d^l = \mathrm{ReLu}(\mathbf{z}_d^l)$ .
|
| 283 |
+
|
| 284 |
+
Initialization of the variational parameters $\{\mathbf{S}_d^l\}$ . The posterior distribution covariance matrices are initialized to the prior $\mathbf{K}_d^l = k_d^l (\mathbf{z}_d^l,\mathbf{z}_d^l)$ (that is, $\mathbf{q}_{-}\mathrm{sqrt}$ is initialized to the identity matrix). Following common practice for DGPs (Salimbeni & Deisenroth, 2017), the covariance matrices of inner layers are scaled by $10^{-5}$ .
|
| 285 |
+
|
| 286 |
+
Initialization of the weights. The Glorot uniform initializer (Glorot & Bengio, 2010), also called Xavier uniform initializer, is used for the weights. The biases are initialized to zero.
|
| 287 |
+
|
| 288 |
+
Initialization of the kernel hyperparameters. The kernels used (RBF and TRI) have two hyperparameters: the variance $\gamma$ and the lengthscale $\ell$ . Both are always initialized to 1 (except for the lengthscale in the 1D example in Section 3.1, where $\ell$ is initialized to 0.1).
|
| 289 |
+
|
| 290 |
+
Initialization of the inducing points. In order to initialize $\mathbf{z}_d^l$ , the $N$ input data points are propagated through the network with the aforementioned initial weights, biases, and activation function. Then, in each layer and unit, $\mathbf{z}_d^l$ is initialized with a linspace between the minimum and maximum of the $N$ values there (the minimum (resp. the maximum) is decreased (resp. increased) by 0.1 to strictly contain the interval of interest).
|
| 291 |
+
|
| 292 |
+
Initialization of the regression likelihood noise. In the regression problems, we use a Gaussian likelihood $\mathrm{p}(y|f) = \mathcal{N}(y|f,\sigma^2)$ . The standard deviation of the noise is initialized to $\sigma = 0.1$ .
|
| 293 |
+
|
| 294 |
+
Mean function. We always use a zero mean function. Since data is normalized to have zero mean (and standard deviation equal to one), a zero mean function allows for reverting to the empirical mean for OOD data, as explained in the main text.
|
| 295 |
+
|
| 296 |
+
Optimizer and learning rate. Throughout the work, we use the Adam Optimizer (Kingma & Ba, 2014) with default parameters and learning rate of 0.001.
|
| 297 |
+
|
| 298 |
+
# B EXPERIMENTAL DETAILS FOR THE EXPERIMENTS
|
| 299 |
+
|
| 300 |
+
All the experiments were run on a NVIDIA Tesla P100. In order to predict, all the methods utilize 100 test samples in all the experiments. Details for each section are provided below.
|
| 301 |
+
|
| 302 |
+
An illustrative example (Section 3.1 in the main text). All the methods use two layers (i.e. one hidden layer). The hidden layer has $D = 25$ units in all cases. BNN and fBNN use ReLu activations. The auNN methods use $M = 10$ inducing points in each unit (the rest of methods do not have such inducing points). The methods are trained during 5000 epochs with the whole dataset (no mini-batches). The dataset is synthetically generated to have two clusters of points around $x = \pm 1$ . More specifically, 30 points are sampled uniformly in each interval $(x - 0.3, x + 0.3)$ for $x = \pm 1$ , and the output is given by the sin function plus a Gaussian noise of standard deviation 0.1. We have also trained DGP and GP on this dataset, see Figure 14. Both methods use $M = 10$ inducing points, and are trained during 5000 epochs with the whole dataset (no mini-batches). DGP has one one hidden layer with $D = 25$ units.
|
| 303 |
+
|
| 304 |
+

|
| 305 |
+
Figure 7: Performance of the compared methods in the gap splits for six UCI datasets. Mean and one standard error of NLL (upper row) and RMSE (lower row) are shown, the lower the better.
|
| 306 |
+
|
| 307 |
+
UCI regression datasets with gap (and standard) splits (Section 3.2 in the main text). The methods use $L = 2,3$ layers. In all cases, the hidden layers have $D = 50$ units. BNN and fBNN use ReLu activations. The methods are trained during 10000 epochs, with a mini-batch size that depends on the size of the dataset. For those with fewer than 5000 instances (i.e. Boston, Concrete, Energy, Wine and Yacht), the mini-batch size is 500. For those with more than 5000 (i.e. Naval), the mini-batch size is 5000. Recall from the main text that each dataset has as many gap splits as dimensionality, with 2/3 for train and 1/3 for test. In the case of standard splits, each dataset uses 10 random $90\% -10\%$ train-test splits. Regarding the segment used in Figure 5, each extreme of the segment is a point from a different connected component of the training set. These are chosen so that the function is well-known in the extremes (but not along the segment, which crosses the gap). Namely, the extremes are chosen as the training points who have minimum average distance to the closest five points in its connected component.
|
| 308 |
+
|
| 309 |
+
Comparison with DGPs (Section 3.3 in the main text). Here, different values of depth $L$ , number of inducing points $M$ and number of hidden layers $D$ are studied (see the main text). auNN is trained during 5000 epochs, with a mini-batch size of 5000 (20000 epochs are used for DGP, as proposed by the authors (Salimbeni & Deisenroth, 2017)). Each experiment is repeated on five random $90\% - 10\%$ train-test splits. DGP uses a RBF kernel. The experimental details for DGP-add are the same as for DGP, with the only difference of the kernel. Namely, an additive kernel using RBF components is used for DGP-add.
|
| 310 |
+
|
| 311 |
+
Large scale experiments (Section 3.4 in the main text). Since we are dealing with classification datasets, a Robust-Max likelihood is used in all cases (Hernández-Lobato et al., 2011). The values of $D$ and $M$ are chosen following the conclusions from Section 3.3. That is, DGP needs large $M$ (the largest $M = 100$ is used), but is less influenced by $D$ (this is chosen as recommended by the authors (Salimbeni & Deisenroth, 2017): $D = \min(30, D^0)$ , with $D^0$ the dimensionality of the input data). auNN needs large $D$ (the largest $D = 50$ is used), but is less influenced by $M$ (the intermediate value $M = 25$ is chosen). All the methods are trained during 100 epochs, with a mini-batch size of 5000. Three random train-test splits are used. In both datasets, 500000 instances are used for test (which leaves 10.5M and 4.5M training instances for HIGGS and SUSY, respectively).
|
| 312 |
+
|
| 313 |
+
# C ADDITIONAL FIGURES AND TABLES
|
| 314 |
+
|
| 315 |
+
Finally, additional material is provided here. Every figure and table is referred from the main text.
|
| 316 |
+
|
| 317 |
+
Table 4: Test NLL for the gap splits of the six UCI datasets (mean and one standard error, the lower the better). Last column is the per-group (weight-space stochasticity vs activation-level stochasticity) average rank.
|
| 318 |
+
|
| 319 |
+
<table><tr><td></td><td>Boston</td><td>Concrete</td><td>Energy</td><td>Naval</td><td>Wine</td><td>Yacht</td><td>Rank</td><td>Rank (group)</td></tr><tr><td>BNN-2</td><td>3.29±0.10</td><td>3.58±0.09</td><td>114.84±70.69</td><td>2186.30±464.32</td><td>0.96±0.01</td><td>1.54±0.09</td><td>3.92±0.79</td><td rowspan="4">4.83±0.32</td></tr><tr><td>BNN-3</td><td>3.54±0.03</td><td>4.23±0.04</td><td>30.91±19.97</td><td>618.44±147.99</td><td>0.98±0.02</td><td>4.10±0.03</td><td>4.98±0.70</td></tr><tr><td>fBNN-2</td><td>3.67±0.25</td><td>4.60±0.39</td><td>111.65±69.68</td><td>1050.65±192.61</td><td>2.80±0.31</td><td>1.77±0.12</td><td>5.04±0.36</td></tr><tr><td>fBNN-3</td><td>3.69±0.24</td><td>4.49±0.34</td><td>93.92±56.45</td><td>1060.54±247.21</td><td>198.76±30.24</td><td>1.47±0.15</td><td>5.36±0.50</td></tr><tr><td>auNN-RBF-2</td><td>5.19±0.47</td><td>4.27±0.26</td><td>39.93±20.89</td><td>379.55±67.74</td><td>1.44±0.05</td><td>1.68±0.35</td><td>4.69±0.61</td><td rowspan="4">4.17±0.40</td></tr><tr><td>auNN-RBF-3</td><td>5.68±0.75</td><td>5.54±0.40</td><td>50.48±28.26</td><td>352.94±72.13</td><td>16.05±1.13</td><td>1.28±0.23</td><td>5.29±0.89</td></tr><tr><td>auNN-TRI-2</td><td>2.77±0.06</td><td>3.45±0.06</td><td>3.99±1.14</td><td>30.47±5.54</td><td>1.06±0.03</td><td>2.34±0.03</td><td>3.25±0.57</td></tr><tr><td>auNN-TRI-3</td><td>2.70±0.04</td><td>3.39±0.06</td><td>5.50±2.45</td><td>2.38±3.23</td><td>1.23±0.04</td><td>2.68±0.30</td><td>3.47±0.80</td></tr></table>
|
| 320 |
+
|
| 321 |
+
Table 5: Test RMSE for the gap splits of the six UCI datasets (mean and one standard error, the lower the better). Last column is the per-group (weight-space stochasticity vs activation-level stochasticity) average rank.
|
| 322 |
+
|
| 323 |
+
<table><tr><td></td><td>Boston</td><td>Concrete</td><td>Energy</td><td>Naval</td><td>Wine</td><td>Yacht</td><td>Rank</td><td>Rank (group)</td></tr><tr><td>BNN-2</td><td>6.54±0.56</td><td>7.62±0.35</td><td>4.23±1.91</td><td>0.03±0.00</td><td>0.63±0.01</td><td>1.18±0.11</td><td>4.09±0.67</td><td rowspan="4">4.91±0.37</td></tr><tr><td>BNN-3</td><td>7.77±0.40</td><td>16.33±0.67</td><td>5.27±1.41</td><td>0.02±0.00</td><td>0.64±0.01</td><td>14.31±0.76</td><td>6.15±0.91</td></tr><tr><td>fBNN-2</td><td>3.75±0.21</td><td>7.58±0.41</td><td>3.95±1.82</td><td>0.03±0.00</td><td>0.78±0.02</td><td>1.25±0.08</td><td>4.70±0.54</td></tr><tr><td>fBNN-3</td><td>3.81±0.20</td><td>7.52±0.36</td><td>4.48±1.79</td><td>0.03±0.00</td><td>0.87±0.04</td><td>1.13±0.12</td><td>4.71±0.53</td></tr><tr><td>auNN-RBF-2</td><td>4.90±0.47</td><td>7.81±0.47</td><td>3.41±1.46</td><td>0.03±0.00</td><td>0.72±0.01</td><td>0.99±0.18</td><td>4.32±0.47</td><td rowspan="4">4.09±0.23</td></tr><tr><td>auNN-RBF-3</td><td>4.27±0.29</td><td>7.74±0.21</td><td>2.72±1.03</td><td>0.03±0.00</td><td>0.82±0.01</td><td>1.03±0.14</td><td>4.27±0.55</td></tr><tr><td>auNN-TRI-2</td><td>4.01±0.30</td><td>7.44±0.38</td><td>2.72±0.79</td><td>0.02±0.00</td><td>0.67±0.01</td><td>1.51±0.20</td><td>3.90±0.33</td></tr><tr><td>auNN-TRI-3</td><td>3.78±0.19</td><td>7.03±0.23</td><td>3.36±1.23</td><td>0.02±0.00</td><td>0.68±0.01</td><td>3.80±2.41</td><td>3.85±0.46</td></tr></table>
|
| 324 |
+
|
| 325 |
+
Table 6: Test NLL for the standard splits of the six UCI datasets (mean and one standard error, the lower the better). Last column is the per-group (weight-space stochasticity vs activation-level stochasticity) average rank.
|
| 326 |
+
|
| 327 |
+
<table><tr><td>test NLL</td><td>Boston</td><td>Concrete</td><td>Energy</td><td>Naval</td><td>Wine</td><td>Yacht</td><td>Rank</td><td>Rank (group)</td></tr><tr><td>BNN-2</td><td>2.71±0.07</td><td>3.12±0.02</td><td>0.65±0.04</td><td>-5.38±0.59</td><td>0.99±0.02</td><td>1.01±0.07</td><td>3.78±0.41</td><td></td></tr><tr><td>BNN-3</td><td>3.62±0.05</td><td>4.24±0.01</td><td>0.80±0.03</td><td>-5.02±0.33</td><td>1.01±0.02</td><td>4.06±0.05</td><td>6.25±0.70</td><td rowspan="3">4.5±0.39</td></tr><tr><td>fBNN-2</td><td>2.83±0.20</td><td>3.20±0.04</td><td>0.67±0.04</td><td>-6.17±0.02</td><td>1.55±0.08</td><td>0.77±0.02</td><td>4.13±0.57</td></tr><tr><td>fBNN-3</td><td>2.75±0.14</td><td>3.13±0.05</td><td>0.65±0.03</td><td>-6.26±0.00</td><td>207.43±9.12</td><td>0.79±0.02</td><td>3.83±0.85</td></tr><tr><td>auNN-RBF-2</td><td>3.38±0.30</td><td>3.14±0.05</td><td>0.63±0.03</td><td>-5.40±0.08</td><td>1.16±0.06</td><td>0.52±0.04</td><td>3.97±0.60</td><td></td></tr><tr><td>auNN-RBF-3</td><td>3.89±0.47</td><td>3.25±0.13</td><td>0.53±0.07</td><td>-5.69±0.03</td><td>8.98±1.51</td><td>0.54±0.03</td><td>4.42±0.85</td><td rowspan="3">4.5±0.43</td></tr><tr><td>auNN-TRI-2</td><td>2.56±0.05</td><td>3.08±0.02</td><td>1.47±0.04</td><td>-4.81±0.07</td><td>0.96±0.03</td><td>2.25±0.02</td><td>4.78±0.92</td></tr><tr><td>auNN-TRI-3</td><td>2.50±0.02</td><td>2.98±0.02</td><td>1.42±0.02</td><td>-3.43±0.32</td><td>1.10±0.07</td><td>2.26±0.01</td><td>4.83±1.01</td></tr></table>
|
| 328 |
+
|
| 329 |
+
Table 7: Test RMSE for the standard splits of the six UCI datasets (mean and one standard error, the lower the better). Last column is the per-group (weight-space stochasticity vs activation-level stochasticity) average rank.
|
| 330 |
+
|
| 331 |
+
<table><tr><td>test RMSE</td><td>Boston</td><td>Concrete</td><td>Energy</td><td>Naval</td><td>Wine</td><td>Yacht</td><td>Rank</td><td>Rank (group)</td></tr><tr><td>BNN-2</td><td>3.47±0.34</td><td>5.49±0.13</td><td>0.45±0.02</td><td>0.00±0.00</td><td>0.65±0.01</td><td>0.68±0.08</td><td>4.70±0.48</td><td rowspan="4">4.59±0.41</td></tr><tr><td>BNN-3</td><td>8.89±0.45</td><td>16.71±0.20</td><td>0.51±0.02</td><td>0.00±0.00</td><td>0.67±0.02</td><td>13.49±0.94</td><td>6.50±0.64</td></tr><tr><td>fBNN-2</td><td>2.80±0.21</td><td>5.34±0.13</td><td>0.47±0.02</td><td>0.00±0.00</td><td>0.70±0.02</td><td>0.33±0.04</td><td>3.70±0.61</td></tr><tr><td>fBNN-3</td><td>2.74±0.16</td><td>5.07±0.12</td><td>0.46±0.02</td><td>0.00±0.00</td><td>0.83±0.02</td><td>0.36±0.04</td><td>3.45±0.88</td></tr><tr><td>auNN-RBF-2</td><td>3.16±0.23</td><td>5.13±0.16</td><td>0.45±0.02</td><td>0.00±0.00</td><td>0.67±0.02</td><td>0.41±0.04</td><td>4.25±0.35</td><td rowspan="4">4.41±0.41</td></tr><tr><td>auNN-RBF-3</td><td>3.01±0.25</td><td>4.51±0.18</td><td>0.41±0.03</td><td>0.00±0.00</td><td>0.76±0.02</td><td>0.38±0.03</td><td>3.35±0.77</td></tr><tr><td>auNN-TRI-2</td><td>3.00±0.26</td><td>5.21±0.10</td><td>0.72±0.02</td><td>0.00±0.00</td><td>0.62±0.02</td><td>1.15±0.14</td><td>5.40±0.80</td></tr><tr><td>auNN-TRI-3</td><td>2.81±0.17</td><td>4.67±0.15</td><td>0.65±0.03</td><td>0.01±0.00</td><td>0.62±0.02</td><td>1.16±0.15</td><td>4.65±1.00</td></tr></table>
|
| 332 |
+
|
| 333 |
+
Table 8: Standard error obtained by auNN and DGP in three splits of the large scale classification datasets HIGGS and SUSY.
|
| 334 |
+
|
| 335 |
+
<table><tr><td rowspan="2"></td><td rowspan="2">N</td><td rowspan="2">D</td><td colspan="6">auNN</td><td colspan="3">DGP</td></tr><tr><td>RBF-2</td><td>RBF-3</td><td>RBF-4</td><td>TRI-2</td><td>TRI-3</td><td>TRI-4</td><td>DGP-2</td><td>DGP-3</td><td>DGP-4</td></tr><tr><td>HIGGS</td><td>11M</td><td>28</td><td>0.0001</td><td>0.0006</td><td>0.0007</td><td>0.0003</td><td>0.0004</td><td>0.0008</td><td>0.0005</td><td>0.0009</td><td>0.0010</td></tr><tr><td>SUSY</td><td>5M</td><td>18</td><td>0.0004</td><td>0.0005</td><td>0.0005</td><td>0.0005</td><td>0.0005</td><td>0.0004</td><td>0.0005</td><td>0.0027</td><td>0.0035</td></tr></table>
|
| 336 |
+
|
| 337 |
+
<table><tr><td colspan="3">auNN-RBF</td><td colspan="3">auNN-TRI</td></tr><tr><td>Layer 1</td><td>Layer 2</td><td>Layer 3</td><td>Layer 1</td><td>Layer 2</td><td>Layer 3</td></tr><tr><td>0 -5 0 5</td><td>0 -5 0 5</td><td>0 -5 0 5</td><td>0.0 -5 0 5</td><td>0.0 -5 0 5</td><td>0.0 -5 0 5</td></tr><tr><td>1 -5 0 5</td><td>1 -5 0 5</td><td></td><td>0.0 -5 0 5</td><td>1 -5 0 5</td><td>1 -5 0 5</td></tr><tr><td>1 -5 0 5</td><td>1 -5 0 5</td><td></td><td>0.0 -5 0 5</td><td>2 -5 0 5</td><td>2 -5 0 5</td></tr><tr><td>1 -5 0 5</td><td>1 -5 0 5</td><td></td><td>0.0 -5 0 5</td><td>2 -5 0 5</td><td>2 -5 0 5</td></tr><tr><td>0.5 -5 0 5</td><td>1 -5 0 5</td><td></td><td>0.0 -5 0 5</td><td>2.5 -5 0 5</td><td>2.5 -5 0 5</td></tr><tr><td>0.5 -5 0 5</td><td>1 -5 0 5</td><td></td><td>0.0 -5 0 5</td><td>2.5 -5 0 5</td><td>2.5 -5 0 5</td></tr><tr><td>0 -5 0 5</td><td>1 -5 0 5</td><td></td><td>0.0 -5 0 5</td><td>2 -5 0 5</td><td>2 -5 0 5</td></tr><tr><td>0 -5 0 5</td><td>1 -5 0 5</td><td></td><td>0.0 -5 0 5</td><td>2 -5 0 5</td><td>2 -5 0 5</td></tr><tr><td>0.5 -5 0 5</td><td>1 -5 0 5</td><td></td><td>0.0 -5 0 5</td><td>0.5 -5 0 5</td><td>0.5 -5 0 5</td></tr><tr><td>0.5 -5 0 5</td><td>1 -5 0 5</td><td></td><td>0.0 -5 0 5</td><td>0.5 -5 0 5</td><td>0.5 -5 0 5</td></tr><tr><td>0.5 -5 0 5</td><td>1 -5 0 5</td><td></td><td>0.0 -5 0 5</td><td>0.5 -5 0 5</td><td></td></tr><tr><td>0.5 -5 0 5</td><td>1 -5 0 5</td><td></td><td>0.0 -5 0 5</td><td>0.5 -5 0 5</td><td></td></tr><tr><td>0.5 -5 0 5</td><td>1 -5 0 5</td><td></td><td>0.0 -5 0 5</td><td>0.5 -5 0 5</td><td></td></tr><tr><td>1 -5 0 5</td><td>0.5 -5 0 5</td><td></td><td>0.25 -0.25 -0.50 -0.5 -0.5</td><td>0.0 -0.5 -0.5</td><td>0.0 -0.5 -0.5</td></tr><tr><td>1 -5 0 5</td><td>0.5 -5 0 5</td><td></td><td>4 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -2 -</td><td></td><td></td></tr></table>
|
| 338 |
+
|
| 339 |
+
Figure 8: A complete example of the activation functions learned by auNN with RBF and TRI kernels. These were obtained for the Energy dataset with the first gap split, using three layers, 10 hidden units per (hidden) layer, and 5 inducing points in each unit. Whereas auNN-RBF learns smoother activations, auNN-TRI ones are piece-wise linear, inspired by the ReLu. Notice that auNN allows units to switch off if they are not required. Black dots represent the five inducing points in each unit. Green points are the locations of the input data when propagated to the corresponding unit.
|
| 340 |
+
|
| 341 |
+

|
| 342 |
+
Figure 9: Test RMSE of auNN and DGP for different values of $M$ (number of inducing points) and $D$ (number of hidden units). Results are the average over 5 independent runs on the UCI Kin8 dataset. The lower the better. Whereas DGP improves "by columns" (i.e. with $M$ ), auNN does it "by rows" (i.e. with $D$ ). This is as theoretically expected, and it is convenient from a scalability viewpoint.
|
| 343 |
+
|
| 344 |
+

|
| 345 |
+
Figure 10: Test RMSE with increasing depth $(L = 2,3,4)$ . This supports that auNN might benefit more than DGP from deeper networks. Moreover, the aforementioned different influence of $M$ and $D$ on DGP and auNN is confirmed here.
|
| 346 |
+
|
| 347 |
+
Table 9: Test NLL of auNN and DGP for different values of $M$ (number of inducing points) and $D$ (number of hidden units). Mean and one standard error over 5 independent runs on the UCI Kin8 dataset are shown. The lower the better.
|
| 348 |
+
|
| 349 |
+
<table><tr><td></td><td colspan="6">auNN-RBF</td><td colspan="6">auNN-TRI</td><td colspan="6">DGP</td><td></td></tr><tr><td>D</td><td>M</td><td>5</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td><td>5</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td><td>5</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td></tr><tr><td>5</td><td></td><td>-0.85±0.01</td><td>-0.89±0.01</td><td>-0.89±0.01</td><td>-0.89±0.01</td><td>-0.89±0.01</td><td>-0.90±0.01</td><td>-0.78±0.00</td><td>-0.79±0.03</td><td>-0.78±0.05</td><td>-0.77±0.04</td><td>-0.67±0.06</td><td>-0.71±0.04</td><td>-0.67±0.01</td><td>-0.98±0.00</td><td>-1.19±0.01</td><td>-1.30±0.01</td><td>-1.33±0.01</td><td>-1.34±0.01</td></tr><tr><td>10</td><td></td><td>-1.06±0.01</td><td>-1.09±0.01</td><td>-1.09±0.01</td><td>-1.09±0.01</td><td>-1.10±0.02</td><td>-1.10±0.01</td><td>-0.96±0.01</td><td>-1.02±0.01</td><td>-1.03±0.01</td><td>-0.98±0.03</td><td>-0.94±0.03</td><td>-0.89±0.03</td><td>-0.69±0.01</td><td>-0.98±0.00</td><td>-1.19±0.00</td><td>-1.30±0.01</td><td>-1.33±0.01</td><td>-1.35±0.01</td></tr><tr><td>25</td><td></td><td>-1.27±0.02</td><td>-1.30±0.02</td><td>-1.30±0.02</td><td>-1.30±0.02</td><td>-1.31±0.01</td><td>-1.31±0.02</td><td>-1.09±0.01</td><td>-1.19±0.01</td><td>-1.22±0.01</td><td>-1.15±0.02</td><td>-1.11±0.01</td><td>-1.06±0.03</td><td>-0.68±0.01</td><td>-0.98±0.00</td><td>-1.17±0.01</td><td>-1.26±0.01</td><td>-1.29±0.01</td><td>-1.30±0.01</td></tr><tr><td>50</td><td></td><td>-1.33±0.01</td><td>-1.34±0.01</td><td>-1.34±0.02</td><td>-1.33±0.01</td><td>-1.34±0.02</td><td>-1.32±0.03</td><td>-1.15±0.01</td><td>-1.24±0.01</td><td>-1.29±0.01</td><td>-1.26±0.01</td><td>-1.24±0.02</td><td>-1.19±0.02</td><td>-0.69±0.01</td><td>-0.96±0.01</td><td>-1.16±0.01</td><td>-1.21±0.01</td><td>-1.22±0.01</td><td>-1.24±0.01</td></tr></table>
|
| 350 |
+
|
| 351 |
+
Table 10: Test RMSE of auNN and DGP for different values of $M$ (number of inducing points) and $D$ (number of hidden units). Mean and one standard error over 5 independent runs on the UCI Kin8 dataset are shown. The lower the better.
|
| 352 |
+
|
| 353 |
+
<table><tr><td></td><td colspan="6">auNN-RBF</td><td colspan="6">auNN-TRI</td><td colspan="5">DGP</td><td></td><td></td></tr><tr><td>D</td><td>M</td><td>5</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td><td>5</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td><td>5</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td></tr><tr><td>5</td><td></td><td>0.10±0.00</td><td>0.10±0.00</td><td>0.10±0.00</td><td>0.10±0.00</td><td>0.10±0.00</td><td>0.10±0.00</td><td>0.11±0.00</td><td>0.11±0.00</td><td>0.11±0.01</td><td>0.11±0.00</td><td>0.12±0.01</td><td>0.12±0.00</td><td>0.12±0.00</td><td>0.09±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.06±0.00</td><td>0.06±0.00</td></tr><tr><td>10</td><td></td><td>0.08±0.00</td><td>0.08±0.00</td><td>0.08±0.00</td><td>0.08±0.00</td><td>0.08±0.00</td><td>0.08±0.00</td><td>0.09±0.00</td><td>0.08±0.00</td><td>0.08±0.00</td><td>0.09±0.00</td><td>0.09±0.00</td><td>0.10±0.00</td><td>0.12±0.00</td><td>0.09±0.00</td><td>0.07±0.00</td><td>0.06±0.00</td><td>0.06±0.00</td><td>0.06±0.00</td></tr><tr><td>25</td><td></td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.08±0.00</td><td>0.08±0.00</td><td>0.08±0.00</td><td>0.12±0.00</td><td>0.09±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.06±0.00</td></tr><tr><td>50</td><td></td><td>0.06±0.00</td><td>0.06±0.00</td><td>0.06±0.00</td><td>0.06±0.00</td><td>0.06±0.00</td><td>0.06±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.12±0.00</td><td>0.09±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td><td>0.07±0.00</td></tr></table>
|
| 354 |
+
|
| 355 |
+
Table 11: Test NLL of auNN and DGP for different values of $M$ (number of inducing points) and $D$ (number of hidden units) as the depth increases from $L = 2$ to $L = 4$ . Mean and one standard error over 5 independent runs on the UCI Power dataset are shown. The lower the better.
|
| 356 |
+
|
| 357 |
+
<table><tr><td rowspan="2">L</td><td rowspan="2">D</td><td rowspan="2">M</td><td colspan="6">auNN-RBF</td><td colspan="6">auNN-TRI</td><td colspan="6">DGP</td></tr><tr><td>5</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td><td>5</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td><td>5</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td></tr><tr><td rowspan="4">2</td><td>5</td><td></td><td>2.86±0.02</td><td>2.84±0.02</td><td>2.84±0.02</td><td>2.84±0.02</td><td>2.84±0.02</td><td>2.84±0.02</td><td>2.85±0.02</td><td>2.83±0.02</td><td>2.82±0.02</td><td>2.84±0.02</td><td>2.89±0.03</td><td>2.84±0.02</td><td>2.87±0.02</td><td>2.85±0.02</td><td>2.83±0.02</td><td>2.82±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td></tr><tr><td>10</td><td></td><td>2.84±0.02</td><td>2.83±0.02</td><td>2.82±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.84±0.02</td><td>2.82±0.02</td><td>2.80±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.87±0.02</td><td>2.86±0.02</td><td>2.83±0.02</td><td>2.83±0.02</td><td>2.82±0.02</td><td>2.81±0.02</td></tr><tr><td>25</td><td></td><td>2.83±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.83±0.02</td><td>2.80±0.02</td><td>2.78±0.02</td><td>2.78±0.02</td><td>2.78±0.02</td><td>2.79±0.02</td><td>2.87±0.02</td><td>2.85±0.02</td><td>2.83±0.02</td><td>2.82±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td></tr><tr><td>50</td><td></td><td>2.82±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.80±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.82±0.02</td><td>2.80±0.02</td><td>2.77±0.02</td><td>2.76±0.02</td><td>2.76±0.02</td><td>2.76±0.03</td><td>2.86±0.02</td><td>2.87±0.02</td><td>2.85±0.02</td><td>2.83±0.02</td><td>2.82±0.02</td><td>2.81±0.02</td></tr><tr><td rowspan="4">3</td><td>5</td><td></td><td>2.84±0.02</td><td>2.83±0.02</td><td>2.83±0.02</td><td>2.83±0.03</td><td>2.83±0.02</td><td>2.83±0.02</td><td>2.84±0.02</td><td>2.83±0.02</td><td>2.82±0.02</td><td>2.82±0.02</td><td>2.85±0.02</td><td>2.82±0.02</td><td>2.86±0.02</td><td>2.83±0.02</td><td>2.82±0.02</td><td>2.79±0.02</td><td>2.78±0.02</td><td>2.77±0.01</td></tr><tr><td>10</td><td></td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td><td>2.80±0.02</td><td>2.82±0.02</td><td>2.80±0.02</td><td>2.79±0.02</td><td>2.79±0.02</td><td>2.78±0.02</td><td>2.78±0.02</td><td>2.86±0.02</td><td>2.83±0.02</td><td>2.80±0.02</td><td>2.81±0.02</td><td>2.82±0.02</td><td>2.81±0.02</td></tr><tr><td>25</td><td></td><td>2.80±0.02</td><td>2.79±0.02</td><td>2.77±0.02</td><td>2.77±0.02</td><td>2.77±0.02</td><td>2.77±0.02</td><td>2.79±0.02</td><td>2.77±0.02</td><td>2.74±0.02</td><td>2.72±0.02</td><td>2.74±0.03</td><td>2.74±0.03</td><td>2.86±0.02</td><td>2.85±0.02</td><td>2.83±0.02</td><td>2.82±0.02</td><td>2.81±0.02</td><td>2.81±0.02</td></tr><tr><td>50</td><td></td><td>2.78±0.02</td><td>2.78±0.02</td><td>2.77±0.02</td><td>2.76±0.02</td><td>2.76±0.02</td><td>2.76±0.03</td><td>2.78±0.02</td><td>2.75±0.02</td><td>2.71±0.02</td><td>2.71±0.03</td><td>2.70±0.03</td><td>2.70±0.02</td><td>2.87±0.02</td><td>2.87±0.02</td><td>2.84±0.02</td><td>2.82±0.02</td><td>2.82±0.02</td><td>2.81±0.02</td></tr><tr><td rowspan="4">4</td><td>5</td><td></td><td>2.84±0.02</td><td>2.83±0.02</td><td>2.82±0.02</td><td>2.83±0.02</td><td>2.82±0.01</td><td>2.83±0.02</td><td>3.69±0.35</td><td>2.83±0.01</td><td>2.83±0.02</td><td>2.83±0.02</td><td>2.83±0.02</td><td>2.82±0.02</td><td>2.86±0.02</td><td>2.83±0.02</td><td>2.80±0.02</td><td>2.79±0.02</td><td>2.78±0.02</td><td>2.77±0.02</td></tr><tr><td>10</td><td></td><td>2.81±0.02</td><td>2.80±0.02</td><td>2.80±0.02</td><td>2.80±0.02</td><td>2.82±0.02</td><td>2.81±0.02</td><td>2.83±0.02</td><td>2.81±0.01</td><td>2.79±0.01</td><td>2.79±0.02</td><td>2.79±0.02</td><td>2.79±0.02</td><td>2.86±0.02</td><td>2.84±0.02</td><td>2.83±0.02</td><td>2.79±0.02</td><td>2.82±0.02</td><td>2.81±0.02</td></tr><tr><td>25</td><td></td><td>2.79±0.02</td><td>2.78±0.02</td><td>2.77±0.02</td><td>2.75±0.02</td><td>2.77±0.02</td><td>2.76±0.02</td><td>2.80±0.01</td><td>2.78±0.02</td><td>2.75±0.02</td><td>2.74±0.02</td><td>2.75±0.03</td><td>2.75±0.02</td><td>2.86±0.02</td><td>2.85±0.02</td><td>2.83±0.02</td><td>2.82±0.02</td><td>2.82±0.02</td><td>2.81±0.02</td></tr><tr><td>50</td><td></td><td>2.79±0.02</td><td>2.80±0.02</td><td>2.75±0.03</td><td>2.77±0.03</td><td>2.75±0.03</td><td>2.74±0.03</td><td>2.79±0.01</td><td>2.77±0.01</td><td>2.73±0.02</td><td>2.74±0.02</td><td>2.74±0.02</td><td>2.75±0.02</td><td>2.87±0.02</td><td>2.85±0.02</td><td>2.84±0.02</td><td>2.82±0.02</td><td>2.82±0.02</td><td>2.81±0.02</td></tr></table>
|
| 358 |
+
|
| 359 |
+
Table 12: Test RMSE of auNN and DGP for different values of $M$ (number of inducing points) and $D$ (number of hidden units) as the depth increases from $L = 2$ to $L = 4$ . Mean and one standard error over 5 independent runs on the UCI Power dataset are shown. The lower the better.
|
| 360 |
+
|
| 361 |
+
<table><tr><td rowspan="2">L</td><td rowspan="2">D</td><td rowspan="2">M</td><td colspan="6">auNN-RBF</td><td colspan="6">auNN-TRI</td><td colspan="6">DGP</td></tr><tr><td>5</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td><td>5</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td><td>5</td><td>10</td><td>25</td><td>50</td><td>75</td><td>100</td></tr><tr><td rowspan="4">2</td><td>5</td><td>4.20±0.09</td><td>4.14±0.08</td><td>4.12±0.08</td><td>4.13±0.08</td><td>4.13±0.07</td><td>4.14±0.09</td><td>4.16±0.08</td><td>4.09±0.08</td><td>4.06±0.09</td><td>4.12±0.09</td><td>4.32±0.13</td><td>4.12±0.09</td><td>4.24±0.10</td><td>4.19±0.09</td><td>4.08±0.09</td><td>4.05±0.08</td><td>4.03±0.07</td><td>4.01±0.07</td><td></td></tr><tr><td>10</td><td>4.15±0.09</td><td>4.08±0.08</td><td>4.03±0.08</td><td>4.03±0.07</td><td>4.03±0.09</td><td>4.03±0.09</td><td>4.10±0.10</td><td>4.03±0.08</td><td>3.99±0.08</td><td>4.01±0.08</td><td>4.03±0.09</td><td>4.03±0.07</td><td>4.24±0.10</td><td>4.21±0.09</td><td>4.10±0.08</td><td>4.08±0.08</td><td>4.03±0.08</td><td>4.02±0.07</td><td></td></tr><tr><td>25</td><td>4.09±0.08</td><td>4.01±0.08</td><td>4.01±0.08</td><td>4.01±0.08</td><td>4.00±0.09</td><td>4.02±0.08</td><td>4.04±0.08</td><td>3.96±0.07</td><td>3.90±0.08</td><td>3.91±0.08</td><td>3.89±0.08</td><td>3.92±0.07</td><td>4.24±0.09</td><td>4.18±0.09</td><td>4.10±0.09</td><td>4.06±0.08</td><td>4.03±0.08</td><td>4.01±0.08</td><td></td></tr><tr><td>50</td><td>4.06±0.08</td><td>4.00±0.07</td><td>4.00±0.07</td><td>3.98±0.07</td><td>4.01±0.09</td><td>3.99±0.08</td><td>4.04±0.08</td><td>3.93±0.07</td><td>3.86±0.09</td><td>3.83±0.08</td><td>3.81±0.08</td><td>3.81±0.10</td><td>4.24±0.10</td><td>4.24±0.10</td><td>4.18±0.09</td><td>4.11±0.09</td><td>4.06±0.09</td><td>4.03±0.08</td><td></td></tr><tr><td rowspan="4">3</td><td>5</td><td>4.14±0.09</td><td>4.08±0.08</td><td>4.11±0.06</td><td>4.09±0.11</td><td>4.11±0.09</td><td>4.09±0.09</td><td>4.10±0.09</td><td>4.10±0.07</td><td>4.02±0.08</td><td>4.04±0.08</td><td>4.15±0.08</td><td>4.04±0.09</td><td>4.22±0.09</td><td>4.10±0.08</td><td>4.07±0.09</td><td>3.92±0.07</td><td>3.90±0.06</td><td>3.86±0.05</td><td></td></tr><tr><td>10</td><td>4.02±0.08</td><td>4.02±0.08</td><td>4.00±0.07</td><td>4.02±0.07</td><td>4.02±0.07</td><td>3.99±0.07</td><td>4.01±0.08</td><td>3.95±0.07</td><td>3.92±0.07</td><td>3.92±0.08</td><td>3.90±0.07</td><td>3.90±0.08</td><td>4.20±0.09</td><td>4.10±0.08</td><td>3.98±0.08</td><td>3.99±0.07</td><td>4.05±0.08</td><td>4.03±0.08</td><td></td></tr><tr><td>25</td><td>3.96±0.08</td><td>3.93±0.07</td><td>3.87±0.07</td><td>3.87±0.07</td><td>3.87±0.07</td><td>3.83±0.07</td><td>3.88±0.08</td><td>3.84±0.08</td><td>3.76±0.07</td><td>3.67±0.06</td><td>3.75±0.10</td><td>3.71±0.09</td><td>4.23±0.10</td><td>4.19±0.08</td><td>4.11±0.09</td><td>4.06±0.08</td><td>4.03±0.08</td><td>4.02±0.08</td><td></td></tr><tr><td>50</td><td>3.89±0.08</td><td>3.88±0.07</td><td>3.85±0.06</td><td>3.80±0.09</td><td>3.82±0.07</td><td>3.80±0.08</td><td>3.86±0.08</td><td>3.77±0.09</td><td>3.62±0.06</td><td>3.61±0.08</td><td>3.59±0.09</td><td>3.60±0.07</td><td>4.24±0.10</td><td>4.24±0.10</td><td>4.12±0.09</td><td>4.07±0.08</td><td>4.04±0.08</td><td>4.03±0.08</td><td></td></tr><tr><td rowspan="4">4</td><td>5</td><td>4.14±0.10</td><td>4.10±0.08</td><td>4.08±0.08</td><td>4.11±0.09</td><td>4.07±0.05</td><td>4.09±0.09</td><td>12.00±3.26</td><td>4.04±0.07</td><td>4.06±0.09</td><td>4.07±0.07</td><td>4.09±0.08</td><td>4.07±0.08</td><td>4.22±0.09</td><td>4.10±0.08</td><td>3.97±0.07</td><td>3.93±0.07</td><td>3.88±0.07</td><td>3.85±0.07</td><td></td></tr><tr><td>10</td><td>4.01±0.08</td><td>3.98±0.07</td><td>3.99±0.07</td><td>3.99±0.07</td><td>4.05±0.07</td><td>4.01±0.06</td><td>4.03±0.09</td><td>3.99±0.06</td><td>3.94±0.06</td><td>3.94±0.07</td><td>3.92±0.07</td><td>3.93±0.08</td><td>4.20±0.09</td><td>4.12±0.08</td><td>4.08±0.10</td><td>3.94±0.07</td><td>4.06±0.08</td><td>4.01±0.08</td><td></td></tr><tr><td>25</td><td>3.93±0.09</td><td>3.91±0.08</td><td>3.87±0.07</td><td>3.78±0.06</td><td>3.84±0.07</td><td>3.82±0.07</td><td>3.94±0.07</td><td>3.85±0.08</td><td>3.76±0.08</td><td>3.75±0.08</td><td>3.77±0.10</td><td>3.78±0.09</td><td>4.24±0.09</td><td>4.18±0.09</td><td>4.11±0.09</td><td>4.06±0.08</td><td>4.04±0.08</td><td>4.03±0.08</td><td></td></tr><tr><td>50</td><td>3.92±0.08</td><td>3.96±0.07</td><td>3.78±0.11</td><td>3.82±0.07</td><td>3.74±0.10</td><td>3.70±0.07</td><td>3.90±0.08</td><td>3.82±0.06</td><td>3.71±0.09</td><td>3.73±0.09</td><td>3.75±0.08</td><td>3.77±0.08</td><td>4.24±0.09</td><td>4.20±0.10</td><td>4.12±0.09</td><td>4.06±0.08</td><td>4.04±0.08</td><td>4.03±0.08</td><td></td></tr></table>
|
| 362 |
+
|
| 363 |
+

|
| 364 |
+
|
| 365 |
+

|
| 366 |
+
|
| 367 |
+

|
| 368 |
+
Figure 11: Representation of two hidden layers (with two units per layer) for auNN (a), DGP (b), and DGP-add (c).
|
| 369 |
+
|
| 370 |
+
# D COMPUTATIONAL COST SUMMARY
|
| 371 |
+
|
| 372 |
+
Table 15 shows the training computational complexity for the methods compared in this paper. Moreover, in order to evaluate the computational cost in practice, the table also shows the actual running time for the experiment of Section 3.1. BNN is the fastest algorithm, since it utilizes a factorized Gaussian for the approximate posterior. Although fBNN has the same theoretical complexity, the Spectral Stein Gradient Estimator (Shi et al., 2018) is used to compute the KL divergence gradients. Moreover, a GP prior is specified at function space, for which a GP must be trained as a previous step. DGP and auNN have the same theoretical complexity. In practice, auNN is typically faster because it requires fewer inducing points, recall Section 3.3 and Table 3. The running time in Table 15 is very similar for both because the same amount of inducing points $(M = 10)$ is used in this simple experiment.
|
| 373 |
+
|
| 374 |
+

|
| 375 |
+
Figure 12: Test NLL on Power dataset for different values of $D$ and $M$ (the lower the better).
|
| 376 |
+
|
| 377 |
+

|
| 378 |
+
Figure 13: Test RMSE on Power dataset for different values of $D$ and $M$ (the lower the better).
|
| 379 |
+
|
| 380 |
+
Table 13: Standard error for the results in Table 2. Three random train-test splits are considered.
|
| 381 |
+
|
| 382 |
+
<table><tr><td rowspan="2" colspan="2"></td><td rowspan="2">N</td><td rowspan="2">D</td><td colspan="6">auNN</td><td colspan="3">DGP</td></tr><tr><td>RBF-2</td><td>RBF-3</td><td>RBF-4</td><td>TRI-2</td><td>TRI-3</td><td>TRI-4</td><td>DGP-2</td><td>DGP-3</td><td>DGP-4</td></tr><tr><td rowspan="2">Brier</td><td>HIGGS</td><td>11M</td><td>28</td><td>0.0001</td><td>0.0007</td><td>0.0008</td><td>0.0003</td><td>0.0005</td><td>0.0009</td><td>0.0018</td><td>0.0016</td><td>0.0006</td></tr><tr><td>SUSY</td><td>5M</td><td>18</td><td>0.0005</td><td>0.0005</td><td>0.0006</td><td>0.0005</td><td>0.0005</td><td>0.0005</td><td>0.0011</td><td>0.0014</td><td>0.0021</td></tr><tr><td rowspan="2">ECE</td><td>HIGGS</td><td>11M</td><td>28</td><td>0.0015</td><td>0.0020</td><td>0.0022</td><td>0.0010</td><td>0.0035</td><td>0.0019</td><td>0.0006</td><td>0.0004</td><td>0.0008</td></tr><tr><td>SUSY</td><td>5M</td><td>18</td><td>0.0012</td><td>0.0011</td><td>0.0014</td><td>0.0018</td><td>0.0012</td><td>0.0014</td><td>0.0005</td><td>0.0006</td><td>0.0008</td></tr></table>
|
| 383 |
+
|
| 384 |
+
Table 14: Standard error for the results in Table 3. Fifty independent runs are considered.
|
| 385 |
+
|
| 386 |
+
<table><tr><td rowspan="2"></td><td colspan="6">auNN</td><td colspan="3">DGP</td></tr><tr><td>RBF-2</td><td>RBF-3</td><td>RBF-4</td><td>TRI-2</td><td>TRI-3</td><td>TRI-4</td><td>DGP-2</td><td>DGP-3</td><td>DGP-4</td></tr><tr><td>HIGGS</td><td>0.0258</td><td>0.0325</td><td>0.0371</td><td>0.0188</td><td>0.0318</td><td>0.0378</td><td>0.0248</td><td>0.0266</td><td>0.0269</td></tr><tr><td>SUSY</td><td>0.0215</td><td>0.0274</td><td>0.0369</td><td>0.0202</td><td>0.0258</td><td>0.0350</td><td>0.0108</td><td>0.0126</td><td>0.0144</td></tr></table>
|
| 387 |
+
|
| 388 |
+

|
| 389 |
+
Figure 14: DGP and GP trained on the dataset of Section 3.1. The experimental details are analogous to those in Section 3.1, see Appendix B. Whereas DGP underestimates the uncertainty for in-between data, a simpler GP does provide increased uncertainty in the gap.
|
| 390 |
+
|
| 391 |
+

|
| 392 |
+
|
| 393 |
+
Table 15: Training computational cost for the models compared in this paper. The running time (in seconds) corresponds to the mean and one standard error over 10 independent runs of the experiment in Section 3.1. More details in Appendix D.
|
| 394 |
+
|
| 395 |
+
<table><tr><td></td><td>BNN</td><td>fBNN</td><td>DGP</td><td>auNN</td></tr><tr><td>Running time (s)</td><td>15.21 ± 0.78</td><td>51.92 ± 1.07</td><td>22.37 ± 0.97</td><td>21.16 ± 0.89</td></tr><tr><td>Complexity</td><td>O(N ∑i D i D i+1)</td><td>O(N ∑i D i D i+1)</td><td>O(N M2 ∑i D i)</td><td>O(N M2 ∑i D i)</td></tr></table>
|
activationleveluncertaintyindeepneuralnetworks/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4a0131779f9ceb56989072d29ef21f1eaa6e7f1cceab6773b4256297fdaeef3a
|
| 3 |
+
size 1366299
|
activationleveluncertaintyindeepneuralnetworks/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3994e65536902931f39d952963cd73b0dd748a595582d6fb17f5607d35785889
|
| 3 |
+
size 684227
|
activecontrastivelearningofaudiovisualvideorepresentations/5bb5cf6c-3156-4aaf-937c-32d7062f861e_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1efe2147a39f9b4b14cde8b8711d6e0f3a7b4ef455ee8a6dec83495c7c5d4141
|
| 3 |
+
size 110859
|
activecontrastivelearningofaudiovisualvideorepresentations/5bb5cf6c-3156-4aaf-937c-32d7062f861e_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ac7b8399e82ec6de3ff9859088107130c41890d5644970d6380bc052057250e7
|
| 3 |
+
size 137677
|