SlowGuess commited on
Commit
631de9e
·
verified ·
1 Parent(s): 97727e5

Add Batch bf1343f2-16be-4ddc-b114-d2c895ee2224

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. acceleratedsinglecallmethodsforconstrainedminmaxoptimization/5525eba0-a0d6-4274-8898-cde394885d8a_content_list.json +3 -0
  2. acceleratedsinglecallmethodsforconstrainedminmaxoptimization/5525eba0-a0d6-4274-8898-cde394885d8a_model.json +3 -0
  3. acceleratedsinglecallmethodsforconstrainedminmaxoptimization/5525eba0-a0d6-4274-8898-cde394885d8a_origin.pdf +3 -0
  4. acceleratedsinglecallmethodsforconstrainedminmaxoptimization/full.md +1062 -0
  5. acceleratedsinglecallmethodsforconstrainedminmaxoptimization/images.zip +3 -0
  6. acceleratedsinglecallmethodsforconstrainedminmaxoptimization/layout.json +3 -0
  7. acceleratingguideddiffusionsamplingwithsplittingnumericalmethods/8b2761bf-1959-4c5c-997a-497b6f8b1ecb_content_list.json +3 -0
  8. acceleratingguideddiffusionsamplingwithsplittingnumericalmethods/8b2761bf-1959-4c5c-997a-497b6f8b1ecb_model.json +3 -0
  9. acceleratingguideddiffusionsamplingwithsplittingnumericalmethods/8b2761bf-1959-4c5c-997a-497b6f8b1ecb_origin.pdf +3 -0
  10. acceleratingguideddiffusionsamplingwithsplittingnumericalmethods/full.md +335 -0
  11. acceleratingguideddiffusionsamplingwithsplittingnumericalmethods/images.zip +3 -0
  12. acceleratingguideddiffusionsamplingwithsplittingnumericalmethods/layout.json +3 -0
  13. acceleratinghamiltonianmontecarloviachebyshevintegrationtime/74469fb9-cfd5-4e7f-8c5b-74a1af72385b_content_list.json +3 -0
  14. acceleratinghamiltonianmontecarloviachebyshevintegrationtime/74469fb9-cfd5-4e7f-8c5b-74a1af72385b_model.json +3 -0
  15. acceleratinghamiltonianmontecarloviachebyshevintegrationtime/74469fb9-cfd5-4e7f-8c5b-74a1af72385b_origin.pdf +3 -0
  16. acceleratinghamiltonianmontecarloviachebyshevintegrationtime/full.md +594 -0
  17. acceleratinghamiltonianmontecarloviachebyshevintegrationtime/images.zip +3 -0
  18. acceleratinghamiltonianmontecarloviachebyshevintegrationtime/layout.json +3 -0
  19. accuratebayesianmetalearningbyaccuratetaskposteriorinference/bc44ccfe-a7ec-4f63-9ab2-c5d75c07ac9d_content_list.json +3 -0
  20. accuratebayesianmetalearningbyaccuratetaskposteriorinference/bc44ccfe-a7ec-4f63-9ab2-c5d75c07ac9d_model.json +3 -0
  21. accuratebayesianmetalearningbyaccuratetaskposteriorinference/bc44ccfe-a7ec-4f63-9ab2-c5d75c07ac9d_origin.pdf +3 -0
  22. accuratebayesianmetalearningbyaccuratetaskposteriorinference/full.md +0 -0
  23. accuratebayesianmetalearningbyaccuratetaskposteriorinference/images.zip +3 -0
  24. accuratebayesianmetalearningbyaccuratetaskposteriorinference/layout.json +3 -0
  25. accurateneuraltrainingwith4bitmatrixmultiplicationsatstandardformats/177823cd-63fa-4ca5-884d-21d34829807f_content_list.json +3 -0
  26. accurateneuraltrainingwith4bitmatrixmultiplicationsatstandardformats/177823cd-63fa-4ca5-884d-21d34829807f_model.json +3 -0
  27. accurateneuraltrainingwith4bitmatrixmultiplicationsatstandardformats/177823cd-63fa-4ca5-884d-21d34829807f_origin.pdf +3 -0
  28. accurateneuraltrainingwith4bitmatrixmultiplicationsatstandardformats/full.md +305 -0
  29. accurateneuraltrainingwith4bitmatrixmultiplicationsatstandardformats/images.zip +3 -0
  30. accurateneuraltrainingwith4bitmatrixmultiplicationsatstandardformats/layout.json +3 -0
  31. achievetheminimumwidthofneuralnetworksforuniversalapproximation/5fae6e40-789a-4c59-8190-108252522d3c_content_list.json +3 -0
  32. achievetheminimumwidthofneuralnetworksforuniversalapproximation/5fae6e40-789a-4c59-8190-108252522d3c_model.json +3 -0
  33. achievetheminimumwidthofneuralnetworksforuniversalapproximation/5fae6e40-789a-4c59-8190-108252522d3c_origin.pdf +3 -0
  34. achievetheminimumwidthofneuralnetworksforuniversalapproximation/full.md +472 -0
  35. achievetheminimumwidthofneuralnetworksforuniversalapproximation/images.zip +3 -0
  36. achievetheminimumwidthofneuralnetworksforuniversalapproximation/layout.json +3 -0
  37. achievingnearoptimalindividualregretlowcommunicationsinmultiagentbandits/874fb7b1-e176-4d85-8046-a22c9733e770_content_list.json +3 -0
  38. achievingnearoptimalindividualregretlowcommunicationsinmultiagentbandits/874fb7b1-e176-4d85-8046-a22c9733e770_model.json +3 -0
  39. achievingnearoptimalindividualregretlowcommunicationsinmultiagentbandits/874fb7b1-e176-4d85-8046-a22c9733e770_origin.pdf +3 -0
  40. achievingnearoptimalindividualregretlowcommunicationsinmultiagentbandits/full.md +726 -0
  41. achievingnearoptimalindividualregretlowcommunicationsinmultiagentbandits/images.zip +3 -0
  42. achievingnearoptimalindividualregretlowcommunicationsinmultiagentbandits/layout.json +3 -0
  43. achievingsublinearregretininfinitehorizonaveragerewardconstrainedmdpwithlinearfunctionapproximation/16555d2a-4150-4da8-9d23-69d64ca664ee_content_list.json +3 -0
  44. achievingsublinearregretininfinitehorizonaveragerewardconstrainedmdpwithlinearfunctionapproximation/16555d2a-4150-4da8-9d23-69d64ca664ee_model.json +3 -0
  45. achievingsublinearregretininfinitehorizonaveragerewardconstrainedmdpwithlinearfunctionapproximation/16555d2a-4150-4da8-9d23-69d64ca664ee_origin.pdf +3 -0
  46. achievingsublinearregretininfinitehorizonaveragerewardconstrainedmdpwithlinearfunctionapproximation/full.md +0 -0
  47. achievingsublinearregretininfinitehorizonaveragerewardconstrainedmdpwithlinearfunctionapproximation/images.zip +3 -0
  48. achievingsublinearregretininfinitehorizonaveragerewardconstrainedmdpwithlinearfunctionapproximation/layout.json +3 -0
  49. acriticallookattheevaluationofgnnsunderheterophilyarewereallymakingprogress/3e8811ba-7259-4c07-bd30-6f066f89108e_content_list.json +3 -0
  50. acriticallookattheevaluationofgnnsunderheterophilyarewereallymakingprogress/3e8811ba-7259-4c07-bd30-6f066f89108e_model.json +3 -0
acceleratedsinglecallmethodsforconstrainedminmaxoptimization/5525eba0-a0d6-4274-8898-cde394885d8a_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04464d2266a1f53b9c760165d5f8fbc05b0dc250a16f603b941705f4d3f5f7c7
3
+ size 186021
acceleratedsinglecallmethodsforconstrainedminmaxoptimization/5525eba0-a0d6-4274-8898-cde394885d8a_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ca14deea2464c6c219e8b3c56a89e3106e4711959bf257da0e2c0d420a630e3
3
+ size 222231
acceleratedsinglecallmethodsforconstrainedminmaxoptimization/5525eba0-a0d6-4274-8898-cde394885d8a_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0542a67e172a81fe917fb12efcd516b562d56aaeae617ce1ad30cafafd948e8b
3
+ size 419364
acceleratedsinglecallmethodsforconstrainedminmaxoptimization/full.md ADDED
@@ -0,0 +1,1062 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ACCELERATED SINGLE-CALL METHODS FOR CONSTRAINED MIN-MAX OPTIMIZATION
2
+
3
+ Yang Cai
4
+
5
+ Yale University
6
+
7
+ yang.cai@yale.edu
8
+
9
+ Weiqiang Zheng
10
+
11
+ Yale University
12
+
13
+ weiqiang.zheng@yale.edu
14
+
15
+ # ABSTRACT
16
+
17
+ We study first-order methods for constrained min-max optimization. Existing methods either require two gradient calls or two projections in each iteration, which may be costly in some applications. In this paper, we first show that a variant of the Optimistic Gradient (OG) method, a single-call single-projection algorithm, has $O\left(\frac{1}{\sqrt{T}}\right)$ best-iterate convergence rate for inclusion problems with operators that satisfy the weak Minty variation inequality (MVI). Our second result is the first single-call single-projection algorithm - the Accelerated Reflected Gradient (ARG) method that achieves the optimal $O\left(\frac{1}{T}\right)$ last-iterate convergence rate for inclusion problems that satisfy negative comonotonicity. Both the weak MVI and negative comonotonicity are well-studied assumptions and capture a rich set of non-convex non-concave min-max optimization problems. Finally, we show that the Reflected Gradient (RG) method, another single-call single-projection algorithm, has $O\left(\frac{1}{\sqrt{T}}\right)$ last-iterate convergence rate for constrained convex-concave min-max optimization, answering an open problem of (Hsieh et al., 2019). Our convergence rates hold for standard measures such as the tangent residual and the natural residual.
18
+
19
+ # 1 INTRODUCTION
20
+
21
+ Various Machine Learning applications, from the generative adversarial networks (GANs) (e.g., (Goodfellow et al., 2014; Arjovsky et al., 2017)), adversarial examples (e.g., (Madry et al., 2017)), robust optimization (e.g., (Ben-Tal et al., 2009)), to reinforcement learning (e.g., (Du et al., 2017; Dai et al., 2018)), can be captured by constrained min-max optimization. Unlike the well-behaved convex-concave setting, these modern ML applications often require solving non-convex non-concave min-max optimization problems in high dimensional spaces.
22
+
23
+ Unfortunately, the general non-convex non-concave setting is intractable even for computing a local solution (Hirsch et al., 1989; Papadimitriou, 1994; Daskalakis et al., 2021). Motivated by the intractability, researchers turn their attention to non-convex non-concave settings with structure. Significant progress has been made for several interesting structured non-convex non-concave settings, such as the ones that satisfy the weak Minty variation inequality (MVI) (Definition 2) (Diakonikolas et al., 2021; Pethick et al., 2022) and the ones that satisfy the more strict negatively comonotone condition (Definition 3) (Lee & Kim, 2021a; Cai et al., 2022a). These algorithms are variations of the celebrated extragradient (EG) method (Korpelevich, 1976), an iterative first-order method. Similar to the extragradient method, these algorithms all require two oracle calls per iteration, which may be costly in practice. We investigate the following important question in this paper:
24
+
25
+ Can we design efficient single-call first-order methods for
26
+
27
+ structured non-convex non-concave min-max optimization? $(^{*})$
28
+
29
+ We provide an affirmative answer to the question. We first show that a single-call method known as the Optimistic Gradient (OG) method (Hsieh et al., 2019) is applicable to all non-
30
+
31
+ convex non-concave settings that satisfy the weak MVI. We then provide the Accelerated Reflected Gradient (ARG) method that achieves the optimal convergence rate in all non-convex non-concave settings that satisfy the negatively comonotone condition. Single-call methods have been studied in the convex-concave settings (Hsieh et al., 2019) but not for the more general non-convex non-concave settings. See Table 1 for comparisons between our algorithms and other algorithms from the literature.
32
+
33
+ <table><tr><td rowspan="2"></td><td rowspan="2">Algorithm</td><td rowspan="2">1-Call?</td><td rowspan="2">Constraints?</td><td colspan="2">Non-Monotone</td></tr><tr><td>Comonotone</td><td>weak MVI</td></tr><tr><td>Normal</td><td>EG+ (Diakonikolas et al., 2021)</td><td>X</td><td>X</td><td>O(1/√T)</td><td>O(1/√T)</td></tr><tr><td></td><td>CEG+ (Pethick et al., 2022)</td><td>X</td><td>✓</td><td>O(1/√T)</td><td>O(1/√T)</td></tr><tr><td></td><td>OGDA (Böhm, 2022; Bot et al., 2022)</td><td>✓</td><td>X</td><td>O(1/√T)</td><td>O(1/√T)</td></tr><tr><td></td><td>OG[this paper]</td><td>✓</td><td>✓</td><td>O(1/√T)</td><td>O(1/√T)</td></tr><tr><td rowspan="3">Accelerated</td><td>FEG (Lee &amp; Kim, 2021b)</td><td>X</td><td>X</td><td>O(1/T)</td><td></td></tr><tr><td>AS (Cai et al., 2022a)</td><td>X</td><td>✓</td><td>O(1/T)</td><td></td></tr><tr><td>ARG [This paper]</td><td>✓</td><td>✓</td><td>O(1/T)</td><td></td></tr></table>
34
+
35
+ Table 1: Existing results for min-max optimization problem with non-monotone operators. A $\checkmark$ in "Constraints?" means the algorithm works in the constrained setting. The convergence rate is in terms of the operator norm (in the unconstrained setting) and the residual (in the constrained setting).
36
+
37
+ # 1.1 OUR CONTRIBUTIONS
38
+
39
+ Throughout the paper, we adopt the more general and abstract framework of inclusion problems, which includes constrained min-max optimization as a special case. More specifically, we consider the following problem.
40
+
41
+ Inclusion Problem. Given $E = F + A$ where $F: \mathbb{R}^n \to \mathbb{R}^n$ is a single-valued (possibly non-monotone) operator and $A: \mathbb{R}^n \rightrightarrows \mathbb{R}^n$ is a set-valued maximally monotone operator, the inclusion problem is defined as follows
42
+
43
+ $$
44
+ \text {f i n d} z ^ {*} \in \mathcal {Z} \text {s u c h} \mathbf {0} \in E \left(z ^ {*}\right) = F \left(z ^ {*}\right) + A \left(z ^ {*}\right). \tag {IP}
45
+ $$
46
+
47
+ As shown in the following example, we can interpret a min-max optimization problem as an inclusion problem.
48
+
49
+ Example 1 (Min-Max Optimization). The following structured min-max optimization problem captures a wide range of applications in machine learning such as GANs, adversarial examples, robust optimization, and reinforcement learning:
50
+
51
+ $$
52
+ \min _ {x \in \mathbb {R} ^ {n _ {x}}} \max _ {y \in \mathbb {R} ^ {n _ {y}}} f (x, y) + g (x) - h (y), \tag {1}
53
+ $$
54
+
55
+ where $f(\cdot, \cdot)$ is possibly non-convex in $x$ and non-concave in $y$ . Regularized and constrained min-max problems are covered by appropriate choices of lower semi-continuous and convex functions $g$ and $h$ . Examples include the $\ell_1$ -norm, the $\ell_2$ -norm, and the indicator function of a closed convex feasible set. Let $z = (x, y)$ , if we define $F(z) = (\partial_x f(x, y), -\partial_y f(x, y))$ and $A(z) = (\partial g(x), \partial h(y))$ , where $A$ is maximally monotone, then the first-order optimality condition of (1) has the form of an inclusion problem.
56
+
57
+ (Daskalakis et al., 2021) shows that without any assumption on the operator $E = F + A$ , the problem is intractable. The most well understood setting is when $E$ is monotone, i.e., $\langle u - v, z - z' \rangle \geq 0$ for all $z, z'$ and $u \in E(z)$ , $v \in E(z')$ , which captures convex-concave min-max optimization. Motivated by non-convex non-concave min-max optimization, we consider the two most widely studied families of non-monotone operators: (i) negatively
58
+
59
+ comonotone operators and (ii) operators that satisfy the less restrictive weak MVI. See Section 2 for more detailed discussion on their relationship. Here are the main contributions of this paper.
60
+
61
+ Contribution 1: We provide an extension of the Optimistic Gradient (OG) method for inclusion problems when the operator $E = F + A$ satisfies the weak MVI. More specifically, we prove that OG has a $O\left(\frac{1}{\sqrt{T}}\right)$ convergence rate (Theorem 1) matching the state of the art algorithms (Diakonikolas et al., 2021; Pethick et al., 2022). Importantly, our algorithm only requires a single oracle call to $F$ and a single call to the resolvent of $A$ .<sup>a</sup>
62
+
63
+ ${}^{a}$ The resolvent of $A$ is defined as ${\left( I + A\right) }^{-1}$ . When $A$ is the subdifferential of the indicator function of a closed convex set,the resolvent operator is exactly the Euclidean projection. Hence our algorithm performs a single projection in the constrained case.
64
+
65
+ Next, we provide an accelerated single-call method when the operator satisfies the stronger negatively comonotone condition.
66
+
67
+ Contribution 2: We design an accelerated version of the Reflected Gradient (RG) (Chambolle & Pock, 2011; Malitsky, 2015; Cui & Shanbhag, 2016; Hsieh et al., 2019) method that we call the Accelerated Reflected Gradient (ARG) method, which has the optimal $O\left(\frac{1}{T}\right)$ convergence rate for inclusion problems whose operators $E = F + A$ are negatively comonotone (Theorem 2). Note that $O\left(\frac{1}{T}\right)$ is the optimal convergence rate for any first-order methods even for monotone inclusion problems (Diakonikolas, 2020; Yoon & Ryu, 2021). Importantly, ARG only requires a single oracle call to $F$ and a single call to the resolvent of $A$ .
68
+
69
+ Finally, we resolve an open question from (Hsieh et al., 2019).
70
+
71
+ Contribution 3: We show that the Reflected Gradient (RG) method has a last-iterate convergence rate of $O\left(\frac{1}{\sqrt{T}}\right)$ for constrained convex-concave min-max optimization (Theorem 3). Hsieh et al. (2019) show that the RG algorithm asymptotically converges but fails to obtain a concrete rate. We strengthen their result to obtain a tight finite convergence rate for RG.
72
+
73
+ We also provide illustrative numerical experiments in Appendix E.
74
+
75
+ # 1.2 RELATED WORKS
76
+
77
+ We provide a brief discussion of the most relevant and recent results on nonconvex-nonconcave min-max optimization here and defer the discussion on related results in the convex-concave setting to Appendix A. We also refer readers to (Facchinei & Pang, 2003; Bauschke & Combettes, 2011; Ryu & Yin, 2022) and references therein for a comprehensive literature review on inclusion problems and related variational inequality problems.
78
+
79
+ Structured Nonconvex-Nonconcave Min-Max Optimization. Since in general nonconvex-nonconcave min-max optimization problems are intractable, recent works study problems under additional assumptions. The Minty variational inequality (MVI) assumption (also called coherence or variational stability), which covers all quasiconvex-concave and starconvex-concave problems, is well-studied in e.g., (Dang & Lan, 2015; Zhou et al., 2017; Liu et al., 2019; Malitsky, 2020; Song et al., 2020; Liu et al., 2021). Extragradient-type algorithms have $O\left(\frac{1}{\sqrt{T}}\right)$ convergence rate for problems that satisfies MVI (Dang & Lan, 2015).
80
+
81
+ Diakonikolas et al. (2021) proposes a weaker assumption called weak MVI, which includes both MVI or negative comonotonicity (Bauschke et al., 2021) as special cases. Under the weak MVI, the $\mathrm{EG + }$ (Diakonikolas et al., 2021) and $\mathrm{OGDA + }$ (Bohm, 2022) algorithms have $O\left(\frac{1}{\sqrt{T}}\right)$ convergence rate in the unconstrained setting. Recently, Pethick et al. (2022) gen
82
+
83
+ eralizes $\mathrm{EG + }$ to $\mathrm{CEG + }$ algorithm, achieving the same convergence rate in the general (constrained) setting. To the best of our knowledge, the OG algorithm is the only single-call single-resolvent algorithm with $O\left(\frac{1}{\sqrt{T}}\right)$ convergence rate when we only assume weak MVI (Theorem 1).
84
+
85
+ The result for accelerated algorithms in the nonconvex-nonconcave setting is sparser. For negatively comonotone operators, optimal $O\left(\frac{1}{T}\right)$ convergence rate is achieved by variants of the EG algorithm in the unconstrained setting (Lee & Kim, 2021a) and in the constrained setting (Cai et al., 2022a). To the best of our knowledge, the ARG algorithm is the first efficient single-call single-resolvent method that achieves the accelerated and optimal $O\left(\frac{1}{T}\right)$ convergence rate in the constrained nonconvex-nonconcave setting (Theorem 2). We summarize previous results and our results in Table 1. Our analysis of ARG is inspired by (Cai et al., 2022a) and uses a similar potential function argument.
86
+
87
+ # 2 PRELIMINARIES
88
+
89
+ Basic Notations. Throughout the paper, we focus on the Euclidean space $\mathbb{R}^n$ equipped with inner product $\langle \cdot ,\cdot \rangle$ . We denote the standard $\ell_2$ -norm by $\| \cdot \|$ . For any closed and convex set $\mathcal{Z}\subseteq \mathbb{R}^n,\Pi_{\mathcal{Z}}[\cdot ]:\mathbb{R}^n\to \mathcal{Z}$ denotes the Euclidean projection onto set $\mathcal{Z}$ such that for any $z\in \mathbb{R}^n,\Pi_{\mathcal{Z}}[z] = \mathrm{argmin}_{z'\in \mathcal{Z}}\| z - z'\|$ . We denote $\mathcal{B}(z,r)$ the $\ell_2$ -ball centered at $z$ with radius $r$ .
90
+
91
+ Normal Cone. We denote $N_{\mathcal{Z}}: \mathcal{Z} \to \mathbb{R}^n$ to be the normal cone operator such that for $z \in \mathcal{Z}$ , $N_{\mathcal{Z}}(z) = \{a : \langle a, z' - z \rangle \leq 0, \forall z' \in \mathcal{Z}\}$ . Define the indicator function
92
+
93
+ $$
94
+ \mathbb {I} _ {\mathcal {Z}} (z) = \left\{ \begin{array}{l l} 0 & \text {i f} z \in \mathcal {Z}, \\ + \infty & \text {o t h e r w i s e}. \end{array} \right.
95
+ $$
96
+
97
+ It is not hard to see that the subdifferential operator $\partial \mathbb{I}_{\mathcal{Z}} = N_{\mathcal{Z}}$ . A useful fact is that if $z = \Pi_{\mathcal{Z}}[z']$ , then $\lambda(z' - z) \in N_{\mathcal{Z}}(z)$ for any $\lambda \geq 0$ .
98
+
99
+ Monotone Operator. We recall some standard definitions and results on monotone operators here and refer the readers to (Bauschke & Combettes, 2011; Ryu & Boyd, 2016; Ryu & Yin, 2022) for more detailed introduction. A set-valued operator $A: \mathbb{R}^n \Rightarrow \mathbb{R}^n$ maps each point $z \in \mathbb{R}^n$ to a subset $A(z) \subseteq \mathbb{R}^n$ . We denote the graph of $A$ as $\operatorname{Gra}(A) := \{(z,u): u \in A(z)\}$ and the zeros of $A$ as $\operatorname{Zer}(A) = \{z : \mathbf{0} \in A(z)\}$ . The inverse operator of $A$ is denoted as $A^{-1}$ whose graph is $\operatorname{Gra}(A^{-1}) = \{(u,z): (z,u) \in \operatorname{Gra}(A)\}$ . For two operators $A$ and $B$ , we denote $A + B$ to be the operator with graph $\operatorname{Gra}(A + B) = \{(z,u_A + u_B): (z,u_A) \in \operatorname{Gra}(A), (z,u_B) \in \operatorname{Gra}(B)\}$ . We denote the identity operator as $I: \mathbb{R}^n \to \mathbb{R}^n$ . We say operator $A$ is single valued if $|A(z)| \leq 1$ for all $z \in \mathbb{R}^n$ . Single-valued operator $A$ is L-Lipschitz if $\|A(z) - A(z')\| \leq L \cdot \|z - z'\|$ , $\forall z, z' \in \mathbb{R}^n$ . Moreover, we say $A$ is non-expansive if it is 1-Lipschitz.
100
+
101
+ Definition 1 ((Maximally) monotonicity). An operator $A:\mathbb{R}^n\Rightarrow \mathbb{R}^n$ is monotone if
102
+
103
+ $$
104
+ \left\langle u - u ^ {\prime}, z - z ^ {\prime} \right\rangle \geq 0, \quad \forall (z, u), \left(z ^ {\prime}, u ^ {\prime}\right) \in \operatorname {G r a} (A).
105
+ $$
106
+
107
+ Moreover, $A$ is maximally monotone if $A$ is monotone and $\operatorname{Gra}(A)$ is not properly contained in the graph of any other monotone operators.
108
+
109
+ When $g: \mathbb{R}^n \to \mathbb{R}^n$ is closed, convex, and proper, then its subdifferential operator $\partial g$ is maximally monotone. As an example, the normal cone operator $N_{\mathcal{Z}} = \partial \mathbb{I}_{\mathcal{Z}}$ is maximally monotone.
110
+
111
+ We denote resolvent of $A$ as $J_A = (I + A)^{-1}$ . Some useful properties of the resolvent are summarized in the following proposition.
112
+
113
+ Proposition 1. If $A$ is maximally monotone, then $J_{A}$ satisfies the following.
114
+
115
+ 1. The domain of $J_{A}$ is $\mathbb{R}^n$ . $J_{A}$ is non-expansive and single-valued on $\mathbb{R}^n$ .
116
+
117
+ 2. If $z = J_A(z')$ , then $z' - z \in A(z)$ . If $c \in A(z)$ , then $z = J_A(z + c)$ .
118
+ 3. When $A = N_{\mathcal{Z}}$ is the normal cone operator for some closed convex set $\mathcal{Z}$ , then $J_{\eta A} = \Pi_{\mathcal{Z}}$ is the Euclidean projection onto $\mathcal{Z}$ for all $\eta > 0$ .
119
+
120
+ # Non-Monotone Operator.
121
+
122
+ Definition 2 (Weak MVI (Diakonikolas et al., 2021; Pethick et al., 2022)). An operator $A: \mathbb{R}^n \Rightarrow \mathbb{R}^n$ satisfies weak MVI if for some $z^* \in \mathrm{Zer}(A)$ , there exists $\rho \leq 0$
123
+
124
+ $$
125
+ \langle u, z - z ^ {*} \rangle \geq \rho \| u \| ^ {2}, \quad \forall (z, u) \in \operatorname {G r a} (A).
126
+ $$
127
+
128
+ Definition 3 (Comonotonicity (Bauschke et al., 2021)). An operator $A: \mathbb{R}^n \Rightarrow \mathbb{R}^n$ is $\rho$ -comonotone if
129
+
130
+ $$
131
+ \left\langle u - u ^ {\prime}, z - z ^ {\prime} \right\rangle \geq \rho \| u - u ^ {\prime} \| ^ {2}, \quad \forall (z, u), (z ^ {\prime}, u ^ {\prime}) \in \operatorname {G r a} (A).
132
+ $$
133
+
134
+ When $A$ is $\rho$ -comonotone for $\rho > 0$ , then $A$ is also known as $\rho$ -cocoercive, which is a stronger condition than monotonicity. When $A$ is $\rho$ -comonotone for $\rho < 0$ , then $A$ is non-monotone. Weak MVI with $\rho = 0$ is also known as MVI, coherence, or variational stability. Note that the weak MVI is implied by negative comonotonicity. We refer the readers to (Lee & Kim, 2021a, Example 1), (Diakonikolas et al., 2021, Section 2.2) and (Pethick et al., 2022, Section 5) for examples of min-max optimization problems that satisfy the two conditions.
135
+
136
+ # 2.1 PROBLEM FORMULATION
137
+
138
+ Inclusion Problem. Given $E = F + A$ where $F: \mathbb{R}^n \to \mathbb{R}^n$ is a single-valued (possibly non-monotone) operator and $A: \mathbb{R}^n \Rightarrow \mathbb{R}^n$ is a set-valued maximally monotone operator, the inclusion problem is defined as follows
139
+
140
+ $$
141
+ \text {f i n d} z ^ {*} \in \mathcal {Z} \text {s u c h t h a t} \mathbf {0} \in E \left(z ^ {*}\right) = F \left(z ^ {*}\right) + A \left(z ^ {*}\right). \tag {IP}
142
+ $$
143
+
144
+ We say $z$ is an $\epsilon$ -approximate solution to an inclusion problem (IP) if $\mathbf{0} \in F(z) + A(z) + B(\mathbf{0},\epsilon)$ . Throughout the paper, we study IP problems under the following assumption.
145
+
146
+ Assumption 1. In the setup of IP,
147
+
148
+ 1. there exists $z^{*}\in \mathsf{Zer}(E)$ , i.e., $\mathbf{0}\in F(z^{*}) + A(z^{*})$
149
+ 2. $F$ is $L$ -Lipschitz.
150
+ 3. $A$ is maximally monotone.
151
+
152
+ When $F$ is monotone, we refer to the corresponding IP problem as a monotone inclusion problem, which covers convex-concave min-max optimization. In the more general non-monotone setting, we would study problems that satisfy negative comonotonicity or weak MVI.
153
+
154
+ Assumption 2 (Comonotonicity). In the setup of $IP$ , $E = F + A$ is $\rho$ -comonotone, i.e.,
155
+
156
+ $$
157
+ \left\langle u - u ^ {\prime}, z - z ^ {\prime} \right\rangle \geq \rho \| u - u ^ {\prime} \| ^ {2}, \quad \forall (z, u), (z ^ {\prime}, u ^ {\prime}) \in \operatorname {G r a} (E).
158
+ $$
159
+
160
+ Assumption 3 (Weak MVI). In the setup of IP, $E = F + A$ satisfies weak MVI with $\rho \leq 0$ , i.e., there exists $z^{*} \in \operatorname{Zer}(E)$ ,
161
+
162
+ $$
163
+ \langle u, z - z ^ {*} \rangle \geq \rho \| u \| ^ {2}, \quad \forall (z, u) \in \operatorname {G r a} (E).
164
+ $$
165
+
166
+ An important special case of inclusion problem is the variational inequality problem.
167
+
168
+ Variational Inequality. Let $\mathcal{Z} \subseteq \mathbb{R}^n$ be a closed and convex set and $F: \mathbb{R}^n \to \mathbb{R}^n$ be a single-valued operator. The variation inequality (VI) problem associated with $\mathcal{Z}$ and $F$ is stated as
169
+
170
+ $$
171
+ \text {f i n d} z ^ {*} \in \mathcal {Z} \text {s u c h t h a t} \langle F (z ^ {*}), z ^ {*} - z \rangle \leq 0, \forall z \in \mathcal {Z}. \tag {VI}
172
+ $$
173
+
174
+ Note that VI is a special case of IP when $A = N_{\mathcal{Z}} = \partial \mathbb{I}_{\mathcal{Z}}$ is the normal cone operator:
175
+
176
+ $$
177
+ \mathbf {0} \in F (z ^ {*}) + N _ {\mathcal {Z}} (z ^ {*}) \Leftrightarrow - F (z ^ {*}) \in N _ {\mathcal {Z}} (z ^ {*}) \Leftrightarrow \langle F (z ^ {*}), z ^ {*} - z \rangle \leq 0, \forall z \in \mathcal {Z}.
178
+ $$
179
+
180
+ The general formulation of VI unifies many problems such as convex optimization, min-max optimization, computing Nash equilibria in multi-player concave games, and is extensively-studied since 1960s (Facchinei & Pang, 2003). Definitions of the convergence measure for VI and the classical algorithms, EG and PEG, are presented in Appendix B.
181
+
182
+ # 2.2 CONVERGENCE MEASURE
183
+
184
+ We focus on a strong convergence measure called the tangent residual, defined as $r_{F,A}^{tan}(z) \coloneqq \min_{c \in A(z)} \| F(z) + c\|$ . It is clear by definition that $r_{F,A}^{tan}(z) \leq \epsilon$ implies $z$ is an $\epsilon$ -approximate solution to the inclusion (IP) problem, and also an $(\epsilon \cdot D)$ approximate strong solution to the corresponding variational inequality (VI) problem when $\mathcal{Z}$ is bounded by $D$ . Moreover, the tangent residual is an upper bound of other notions of residuals in the literature such as the natural residual $r_{F,A}^{nat}$ (Diakonikolas, 2020) or the forward-backward residual $r_{F,A}^{fb}$ (Yoon & Ryu, 2022) as shown in Proposition 2 (see Appendix B.4 for the formal statement and proof). Thus our convergence rates on the tangent residual also hold for the natural residual or the forward-backward residual. Note that in the unconstrained setting where $A = 0$ , these residuals are all equivalent to the operator norm $\| F(z)\|$ .
185
+
186
+ # 3 OPTIMISTIC GRADIENT METHOD FOR WEAK MVI PROBLEMS
187
+
188
+ In this section, we consider an extension of the Optimistic Gradient (OG) algorithm (Daskalakis et al., 2017; Mokhtari et al., 2020a,b; Hsieh et al., 2019; Peng et al., 2020) for inclusion problems: given arbitrary starting point $z_{-\frac{1}{2}} = z_0 \in \mathbb{R}^n$ and step size $\eta > 0$ , the update rule is
189
+
190
+ $$
191
+ z _ {t + \frac {1}{2}} = J _ {\eta A} \left[ z _ {t} - \eta F \left(z _ {t - \frac {1}{2}}\right) \right], \tag {OG}
192
+ $$
193
+
194
+ $$
195
+ z _ {t + 1} = z _ {t + \frac {1}{2}} + \eta F \left(z _ {t - \frac {1}{2}}\right) - \eta F \left(z _ {t + \frac {1}{2}}\right).
196
+ $$
197
+
198
+ For $t \geq 1$ , the update rule can also be written as $z_{t + \frac{3}{2}} = J_{\eta A}\left[z_{t + \frac{1}{2}} - 2\eta F(z_{t + \frac{1}{2}}) + \eta F(z_{t - \frac{1}{2}})\right]$ , which coincides with the forward-reflected-backward algorithm (Malitsky & Tam, 2020). We remark that the update rule of OG is different from the Optimistic Gradient Descent/Ascent (OGDA) algorithm (also known as Past Extra Gradient (PEG) algorithm) (Popov, 1980), which is single-call but requires two projections in each iteration.
199
+
200
+ Previous results for OG only hold in the convex-concave (monotone) setting. The main result in this section is that OG has $O\left(\frac{1}{\sqrt{T}}\right)$ convergence rate even for nonconvex-nonconcave min-max optimization problems that satisfy weak MVI, matching the state of the art results achieved by two-call methods (Diakonikolas et al., 2021; Pethick et al., 2022). Remarkably, OG only requires single call to $F$ and single call to the resolvent $J_{\eta A}$ in each iteration. The main result is shown in Theorem 1. The proof relies on a simple yet important observation that $\frac{z_t - z_{t + 1}}{\eta} \in F(z_{t + \frac{1}{2}}) + A(z_{t + \frac{1}{2}})$ .
201
+
202
+ Theorem 1. Assume Assumption 1 and 3 hold with $\rho \in (-\frac{1}{12\sqrt{3}L},0]$ . Consider the iterates of (OG) with step size $\eta \in (0,\frac{1}{2L})$ satisfying $C = \frac{1}{2} +\frac{2\rho}{\eta} -2\eta^2 L^2 >0$ (existence of such $\eta$ is guaranteed by Fact 1). Then for any $T\geq 1$
203
+
204
+ $$
205
+ \min _ {t \in [ T ]} r _ {F, A} ^ {t a n} \left(z _ {t + \frac {1}{2}}\right) ^ {2} \leq \min _ {t \in [ T ]} \frac {\left\| z _ {t + 1} - z _ {t} \right\| ^ {2}}{\eta^ {2}} \leq \frac {H ^ {2}}{C \eta^ {2}} \cdot \frac {1}{T},
206
+ $$
207
+
208
+ where $H^2 = \| z_1 - z^*\|^2 + \frac{1}{4}\| z_{\frac{1}{2}} - z_0\|^2$
209
+
210
+ Proof. From the update rule of (OG), we have the following identity (see also (Hsieh et al., 2019, Appendix B)): for any $p \in \mathcal{Z}$
211
+
212
+ $$
213
+ \begin{array}{l} \left\| z _ {t + 1} - p \right\| ^ {2} = \left\| z _ {t} - p \right\| ^ {2} + \left\| z _ {t + 1} - z _ {t + \frac {1}{2}} \right\| ^ {2} - \left\| z _ {t + \frac {1}{2}} - z _ {t} \right\| ^ {2} \\ + 2 \left\langle z _ {t} - \eta F \left(z _ {t - \frac {1}{2}}\right) - z _ {t + \frac {1}{2}} + \eta F \left(z _ {t + \frac {1}{2}}\right), p - z _ {t + \frac {1}{2}} \right\rangle . \tag {2} \\ \end{array}
214
+ $$
215
+
216
+ Since $z_{t + \frac{1}{2}} = J_{\eta A}[z_t - \eta F(z_{t - \frac{1}{2}})]$ , we have $\frac{z_t - \eta F(z_{t - \frac{1}{2}}) - z_{t + \frac{1}{2}}}{\eta} \in A(z_{t + \frac{1}{2}})$ by Proposition 1. Then
217
+
218
+ $$
219
+ \frac {z _ {t} - z _ {t + 1}}{\eta} = \frac {z _ {t} - \eta F (z _ {t - \frac {1}{2}}) - z _ {t + \frac {1}{2}}}{\eta} + F (z _ {t + \frac {1}{2}}) \in F (z _ {t + \frac {1}{2}}) + A (z _ {t + \frac {1}{2}}).
220
+ $$
221
+
222
+ Set $p = z^{*}$ . By the weak MVI assumption, we have
223
+
224
+ $$
225
+ \begin{array}{l} 2 \Big \langle z _ {t} - \eta F (z _ {t - \frac {1}{2}}) - z _ {t + \frac {1}{2}} + \eta F (z _ {t + \frac {1}{2}}), z ^ {*} - z _ {t + \frac {1}{2}} \Big \rangle = 2 \eta \Big \langle \frac {z _ {t} - z _ {t + 1}}{\eta}, z ^ {*} - z _ {t + \frac {1}{2}} \Big \rangle \\ \leq - \frac {2 \rho}{\eta} \| z _ {t} - z _ {t + 1} \| ^ {2}. \tag {3} \\ \end{array}
226
+ $$
227
+
228
+ Define $c = \frac{1}{2} - 2\eta^2 L^2 > 0$ . We have identity
229
+
230
+ $$
231
+ (1 - 2 c) \eta^ {2} L ^ {2} = 4 \eta^ {4} L ^ {4} = \frac {1}{2} - c - (1 + 2 c) \eta^ {2} L ^ {2}. \tag {4}
232
+ $$
233
+
234
+ Combining Equation (2) and (3) and using $\| a + b\|^2 \leq 2\| a\|^2 + 2\| b\|^2$ , we have
235
+
236
+ $$
237
+ \begin{array}{l} \left\| z _ {t + 1} - z ^ {*} \right\| ^ {2} \\ \leq \| z _ {t} - z ^ {*} \| ^ {2} + \left\| z _ {t + 1} - z _ {t + \frac {1}{2}} \right\| ^ {2} - \left\| z _ {t + \frac {1}{2}} - z _ {t} \right\| ^ {2} + c \| z _ {t} - z _ {t + 1} \| ^ {2} - (c + \frac {2 \rho}{\eta}) \| z _ {t} - z _ {t + 1} \| ^ {2} \\ \leq \left\| z _ {t} - z ^ {*} \right\| ^ {2} + (1 + 2 c) \left\| z _ {t + 1} - z _ {t + \frac {1}{2}} \right\| ^ {2} - (1 - 2 c) \left\| z _ {t + \frac {1}{2}} - z _ {t} \right\| ^ {2} - (c + \frac {2 \rho}{\eta}) \left\| z _ {t} - z _ {t + 1} \right\| ^ {2}. \tag {5} \\ \end{array}
238
+ $$
239
+
240
+ Using the update rule of OG and $L$ -Lipschitzness of $F$ , we have that for any $t \geq 0$
241
+
242
+ $$
243
+ \left\| z _ {t + 1} - z _ {t + \frac {1}{2}} \right\| ^ {2} = \left\| \eta F \left(z _ {t - \frac {1}{2}}\right) - \eta F \left(z _ {t + \frac {1}{2}}\right) \right\| ^ {2} \leq \eta^ {2} L ^ {2} \left\| z _ {t + \frac {1}{2}} - z _ {t - \frac {1}{2}} \right\| ^ {2}. \tag {6}
244
+ $$
245
+
246
+ Moreover, using $\| a + b \|^2 \leq 2 \| a \|^2 + 2 \| b \|^2$ and Equation (6), we have that for any $t \geq 1$ ,
247
+
248
+ $$
249
+ \left\| z _ {t + \frac {1}{2}} - z _ {t - \frac {1}{2}} \right\| ^ {2} \leq 2 \left\| z _ {t + \frac {1}{2}} - z _ {t} \right\| ^ {2} + 2 \left\| z _ {t} - z _ {t - \frac {1}{2}} \right\| ^ {2} \leq 2 \left\| z _ {t + \frac {1}{2}} - z _ {t} \right\| ^ {2} + 2 \eta^ {2} L ^ {2} \left\| z _ {t - \frac {1}{2}} - z _ {t - \frac {3}{2}} \right\| ^ {2}.
250
+ $$
251
+
252
+ which implies
253
+
254
+ $$
255
+ \left\| z _ {t + \frac {1}{2}} - z _ {t} \right\| ^ {2} \geq \frac {1}{2} \left\| z _ {t + \frac {1}{2}} - z _ {t - \frac {1}{2}} \right\| ^ {2} - \eta^ {2} L ^ {2} \left\| z _ {t - \frac {1}{2}} - z _ {t - \frac {3}{2}} \right\| ^ {2}. \tag {7}
256
+ $$
257
+
258
+ Combining Equation (4), (5), (6), and (7), we have that for all $t \geq 1$
259
+
260
+ $$
261
+ \begin{array}{l} \left\| z _ {t + 1} - z ^ {*} \right\| ^ {2} \\ \leq \| z _ {t} - z ^ {*} \| ^ {2} + (1 + 2 c) \left\| z _ {t + 1} - z _ {t + \frac {1}{2}} \right\| ^ {2} - (1 - 2 c) \left\| z _ {t + \frac {1}{2}} - z _ {t} \right\| ^ {2} - (c + \frac {2 \rho}{\eta}) \| z _ {t} - z _ {t + 1} \| ^ {2} \\ \leq \left\| z _ {t} - z ^ {*} \right\| ^ {2} + (1 - 2 c) \eta^ {2} L ^ {2} \left\| z _ {t - \frac {1}{2}} - z _ {t - \frac {3}{2}} \right\| ^ {2} - \left(\frac {1}{2} - c - (1 + 2 c) \eta^ {2} L ^ {2}\right) \left\| z _ {t + \frac {1}{2}} - z _ {t - \frac {1}{2}} \right\| ^ {2} \\ - \left(c + \frac {2 \rho}{\eta}\right) \| z _ {t} - z _ {t + 1} \| ^ {2} \\ = \| z _ {t} - z ^ {*} \| ^ {2} + 4 \eta^ {4} L ^ {4} \bigg (\left\| z _ {t - \frac {1}{2}} - z _ {t - \frac {3}{2}} \right\| ^ {2} - \left\| z _ {t + \frac {1}{2}} - z _ {t - \frac {1}{2}} \right\| ^ {2} \bigg) - (c + \frac {2 \rho}{\eta}) \| z _ {t} - z _ {t + 1} \| ^ {2}. \\ \end{array}
262
+ $$
263
+
264
+ Telescoping the above inequality and using $c = \frac{1}{2} - 2\eta^2 L^2$ and $\eta L < \frac{1}{2}$ , we get
265
+
266
+ $$
267
+ \left(\frac {1}{2} + \frac {2 \rho}{\eta} - 2 \eta^ {2} L ^ {2}\right) \sum_ {t = 1} ^ {T} \| z _ {t} - z _ {t + 1} \| ^ {2} \leq \| z _ {1} - z ^ {*} \| ^ {2} + \frac {1}{4} \left\| z _ {\frac {1}{2}} - z _ {- \frac {1}{2}} \right\| ^ {2}.
268
+ $$
269
+
270
+ Note that $z_0$ is the same as $z_{-\frac{1}{2}}$ . This completes the proof.
271
+
272
+ ![](images/dbc4b1f7fee2551e52bc5f8fcc50fd4ed49237df8d5df8e54f6c2833ba2888ac.jpg)
273
+
274
+ Fact 1. For any $L > 0$ and $\rho > -\frac{1}{12\sqrt{3}L}$ . There exists $\eta \in (0, \frac{1}{2L})$ such that $\frac{1}{2} + \frac{2\rho}{\eta} - 2\eta^2 L^2 > 0$ .
275
+
276
+ Proof. Let $\eta = \frac{1}{2\sqrt{3}L}$ , then the desired inequality holds whenever
277
+
278
+ $$
279
+ \rho > \frac {\eta L \left(1 - 4 \eta^ {2} L ^ {2}\right)}{4} \cdot \frac {1}{L} = - \frac {1}{1 2 \sqrt {3} L}.
280
+ $$
281
+
282
+ ![](images/f862cddf032e64304c841fe1a1889859e17063304c1d85c38d27f5e9b99b24d1.jpg)
283
+
284
+ # 4 ACCELERATED REFLECTED GRADIENT FOR NEGATIVELY COMONOTONE PROBLEMS
285
+
286
+ In this section, we propose a new algorithm called the Accelerated Reflected Gradient (ARG) algorithm. We prove that ARG enjoys accelerated $O\left(\frac{1}{T}\right)$ convergence rate for inclusion problems with comonotone operators (Theorem 2). Note that the lower bound $\Omega\left(\frac{1}{T}\right)$ holds even for the special case of convex-concave min-max optimization (Diakonikolas, 2020; Yoon & Ryu, 2021).
287
+
288
+ Our algorithm is inspired by the Reflected Gradient (RG) algorithm (Chambolle & Pock, 2011; Malitsky, 2015; Cui & Shanbhag, 2016; Hsieh et al., 2019) for monotone variational inequalities. Starting at initial points $z_{-1} = z_0 \in \mathcal{Z}$ , the update rule of RG with step size $\eta > 0$ is as follows: for $t = 0,1,2,\dots$
289
+
290
+ $$
291
+ z _ {t + \frac {1}{2}} = 2 z _ {t} - z _ {t - 1},
292
+ $$
293
+
294
+ $$
295
+ z _ {t + 1} = \Pi_ {\mathcal {Z}} \left[ z _ {t} - \eta F \left(z _ {t + \frac {1}{2}}\right) \right]. \tag {RG}
296
+ $$
297
+
298
+ We propose the following Accelerated Reflected Gradient (ARG) algorithm, which is a single-call single-resolvent first-order method. Given arbitrary initial points $z_0 = z_{\frac{1}{2}} \in \mathbb{R}^n$ and step size $\eta > 0$ , ARG sets $z_1 = J_{\eta A}[z_0 - \eta F(z_0)]$ and updates for $t = 1, 2, \dots$
299
+
300
+ $$
301
+ z _ {t + \frac {1}{2}} = 2 z _ {t} - z _ {t - 1} + \frac {1}{t + 1} (z _ {0} - z _ {t}) - \frac {1}{t} (z _ {0} - z _ {t - 1}),
302
+ $$
303
+
304
+ $$
305
+ z _ {t + 1} = J _ {\eta A} \left[ z _ {t} - \eta F \left(z _ {t + \frac {1}{2}}\right) + \frac {1}{t + 1} \left(z _ {0} - z _ {t}\right) \right]. \tag {ARG}
306
+ $$
307
+
308
+ We use the idea from Halpern iteration (Halpern, 1967) to design the accelerated algorithm (ARG). This technique for deriving optimal first-order methods is also called Anchoring and receives intense attention recently (Diakonikolas, 2020; Yoon & Ryu, 2021; Lee & Kim, 2021a; Tran-Dinh & Luo, 2021; Tran-Dinh, 2022; Cai et al., 2022a). We defer detailed discussion on these works to Appendix A. We remark that the state of the art result from (Cai et al., 2022a) is a variant of the EG algorithm that makes two oracle calls per iteration. Thus, to the best of our knowledge, ARG is the first single-call single-resolvent algorithm with optimal convergence rate for general inclusion problems with comonotone operators.
309
+
310
+ Theorem 2. Assume Assumption 1 and 2 hold for $\rho \in [-\frac{1}{60L}, 0]$ , then the accelerated reflected gradient (ARG) algorithm with constant step size $\eta > 0$ satisfying Inequality (10) has the following convergence rate: for any $T \geq 1$ ,
311
+
312
+ $$
313
+ r _ {F, A} ^ {t a n} (z _ {T}) \leq \frac {\sqrt {6} H}{\eta} \cdot \frac {1}{T},
314
+ $$
315
+
316
+ where $H^2 = \| z_0 - z^*\|^2 + 4\| z_1 - z_0\|^2 \leq \| z_0 - z^*\|^2 + 4r_{F,A}^{tan}(z_0)^2$ .
317
+
318
+ Remark 1. Note that if Assumption 2 is satisfied with respect to some $\rho >0$ , it also satisfies Assumption 2 with $\rho = 0$ , so Theorem 2 applies.
319
+
320
+ We provide a proof sketch for Theorem 2 here and the full proof in Appendix C. Our proof is based on a potential function argument similar to the one in (Cai et al., 2022a).
321
+
322
+ Proof Sketch. We apply a potential function argument. We first show the potential function is approximately non-increasing and then prove that it is upper bounded by a term independent of $T$ . As the potential function at step $t$ is also at least $\Omega(t^2) \cdot r^{\tan(z_t)^2}$ , we conclude that ARG has an $O\left(\frac{1}{T}\right)$ convergence rate.
323
+
324
+ # 5 LAST-ITERATE CONVERGENCE RATE OF REFLECTED GRADIENT
325
+
326
+ In this section, we show that the Reflected Gradient (RG) algorithm (Chambolle & Pock, 2011; Malitsky, 2015; Cui & Shanbhag, 2016; Hsieh et al., 2019) has a last-iterate convergence rate of $O\left(\frac{1}{\sqrt{T}}\right)$ with respect to tangent residual and gap function (see Definition 4) for solving monotone variational inequalities (Theorem 3).
327
+
328
+ Theorem 3. For a variational inequality problem (VI) associated with a closed convex set $\mathcal{Z}$ and a monotone and $L$ -Lipschitz operator $F$ with a solution $z^{*}$ , the (RG) algorithm with constant step size $\eta \in (0, \frac{1}{(1 + \sqrt{2})L})$ has the following last-iterate convergence rate: for any $T \geq 1$ ,
329
+
330
+ $$
331
+ r _ {F, \mathcal {Z}} ^ {\tan} (z _ {T}) \leq \frac {\lambda H L}{\sqrt {T}}, \quad \mathrm {G A P} _ {\mathcal {Z}, F, D} (z _ {T}) \leq \frac {\lambda D H L}{\sqrt {T}}
332
+ $$
333
+
334
+ where $H^2 = 4\| z_0 - z^*\|^2 + \frac{13}{L^2}\| F(z_0)\|^2$ and $\lambda = \sqrt{\frac{6(1 + 3\eta^2L^2)}{\eta^2L^2(1 + (1 + \sqrt{2})\eta L)}}$ .
335
+
336
+ We remark that the convergence rate of RG is slower than ARG and other optimal first-order algorithms even in the monotone setting. Nevertheless, understanding its last-iterate convergence rate is still interesting: (1) RG is simple and largely used in practice; (2) Last-iterate convergence rates of simple classic algorithms such as EG and RG are mentioned as open problems in (Hsieh et al., 2019). The question is recently resolved for EG (Gorbunov et al., 2022a; Cai et al., 2022b) but remains open for RG; (3) Compared to EG, RG requires only a single call to $F$ and a single projection in each iteration.
337
+
338
+ We provide a proof sketch for Theorem 3 here and the full proof in Appendix D.
339
+
340
+ Proof Sketch. Our analysis is based on a potential function argument and can be summarized in the following three steps. (1) We construct a potential function and show that it is non-increasing between two consecutive iterates; (2) We prove that the (RG) algorithm has a best-iterate convergence rate, i.e., for any $T \geq 1$ , there exists one iterate $t^* \in [T]$ such that our potential function at iterate $t^*$ is small; (3) We combine the above steps to show that the last iterate has the same convergence guarantee as the best iterate and derive the $O\left(\frac{1}{\sqrt{T}}\right)$ last-iterate convergence rate.
341
+
342
+ # 6 CONCLUSION
343
+
344
+ This paper introduces single-call single-resolvent algorithms for non-monotone inclusion problems. We prove that OG has $O\left(\frac{1}{\sqrt{T}}\right)$ convergence rate for problems satisfying weak MVI and design a new algorithm - ARG that has the optimal $O\left(\frac{1}{T}\right)$ convergence rate for problems satisfying negative comonotonicity. Finally, we resolve the problem of last-iterate convergence rate of RG.
345
+
346
+ # ACKNOWLEDGEMENTS
347
+
348
+ Yang Cai is supported by a Sloan Foundation Research Fellowship and the NSF Award CCF-1942583 (CAREER). We thank the anonymous reviewers for their constructive comments.
349
+
350
+ # REFERENCES
351
+
352
+ Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein Generative Adversarial Networks. In Proceedings of the 34th International Conference on Machine Learning, pp. 214-223. PMLR, July 2017. URL https://proceedings.mlr.press/v70/arjovsky17a.html. ISSN: 2640-3498.
353
+ Heinz H. Bauschke and Patrick L. Combettes. Convex Analysis and Monotone Operator Theory in Hilbert Spaces. {CMS} {Books} in {Mathematics}. Springer, 2011. URL https://link.springer.com/book/10.1007/978-1-4419-9467-7.
354
+ Heinz H Bauschke, Walaa M Moursi, and Xianfu Wang. Generalized monotone operators and their averaged resolvents. Mathematical Programming, 189(1):55-74, 2021.
355
+ Aharon Ben-Tal, Laurent El Ghaoui, and Arkadi Nemirovski. Robust Optimization. Princeton University Press, December 2009. ISBN 978-1-4008-3105-0. doi: 10.1515/9781400831050. URL https://www.degruyter.com/document/doi/10.1515/9781400831050/html.
356
+ Axel Böhm. Solving nonconvex-nonconcave min-max problems exhibiting weak minty solutions. arXiv preprint arXiv:2201.12247, 2022.
357
+ Radu Ioan Bot, Erno Robert Csetnek, and Dang-Khoa Nguyen. Fast ogda in continuous and discrete time. arXiv preprint arXiv:2203.10947, 2022.
358
+ Yang Cai, Argyris Oikonomou, and Weiqiang Zheng. Accelerated algorithms for monotone inclusion and constrained nonconvex-nonconcave min-max optimization. arXiv preprint arXiv:2206.05248, 2022a.
359
+ Yang Cai, Argyris Oikonomou, and Weiqiang Zheng. Finite-time last-iterate convergence for learning in multi-player games. In Advances in Neural Information Processing Systems (NeurIPS), 2022b.
360
+ Antonin Chambolle and Thomas Pock. A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging. Journal of Mathematical Imaging and Vision, 40 (1):120-145, May 2011. ISSN 1573-7683. doi: 10.1007/s10851-010-0251-1. URL https://doi.org/10.1007/s10851-010-0251-1.
361
+ Shisheng Cui and Uday V Shanbhag. On the analysis of reflected gradient and splitting methods for monotone stochastic variational inequality problems. In 2016 IEEE 55th Conference on Decision and Control (CDC), 2016.
362
+ Bo Dai, Albert Shaw, Lihong Li, Lin Xiao, Niao He, Zhen Liu, Jianshu Chen, and Le Song. Sbeed: Convergent reinforcement learning with nonlinear function approximation. In International Conference on Machine Learning, pp. 1125-1134. PMLR, 2018.
363
+ Cong D Dang and Guanghui Lan. On the convergence properties of non-euclidean extragradient methods for variational inequalities with generalized monotone operators. Computational Optimization and applications, 60(2):277-310, 2015.
364
+ Constantinos Daskalakis, Andrew Ilyas, Vasilis Syrgkanis, and Haoyang Zeng. Training gans with optimism. arXiv preprint arXiv:1711.00141, 2017.
365
+ Constantinos Daskalakis, Stratis Skoulakis, and Manolis Zampetakis. The complexity of constrained min-max optimization. In Proceedings of the 53rd Annual ACM SIGACT Symposium on Theory of Computing (STOC), 2021.
366
+
367
+ Jelena Diakonikolas. Halpern iteration for near-optimal and parameter-free monotone inclusion and strong solutions to variational inequalities. In Conference on Learning Theory (COLT), 2020.
368
+ Jelena Diakonikolas, Constantinos Daskalakis, and Michael Jordan. Efficient methods for structured nonconvex-nonconcave min-max optimization. International Conference on Artificial Intelligence and Statistics, 2021.
369
+ Simon S. Du, Jianshu Chen, Lihong Li, Lin Xiao, and Dengyong Zhou. Stochastic variance reduction methods for policy evaluation. In International Conference on Machine Learning, pp. 1049-1058. PMLR, 2017.
370
+ Francisco Facchinei and Jong-Shi Pang. Finite-Dimensional Variational Inequalities and Complementarity Problems. Springer, 2003. URL https://link.springer.com/book/10.1007/b97544.
371
+ Tanner Fiez and Lillian J Ratliff. Local convergence analysis of gradient descent ascent with finite timescale separation. In Proceedings of the International Conference on Learning Representation, 2021.
372
+ Noah Golowich, Sarath Pattathil, and Constantinos Daskalakis. Tight last-iterate convergence rates for no-regret learning in multi-player games. Advances in neural information processing systems (NeurIPS), 2020.
373
+ Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. Advances in neural information processing systems, 27, 2014.
374
+ Eduard Gorbunov, Nicolas Loizou, and Gauthier Gidel. Extragradient method: $\mathcal{O}(1 / k)$ last-iterate convergence for monotone variational inequalities and connections with cocoercivity. In International Conference on Artificial Intelligence and Statistics (AISTATS), pp. 366-402. PMLR, 2022a.
375
+ Eduard Gorbunov, Adrien Taylor, and Gauthier Gidel. Last-iterate convergence of optimistic gradient method for monotone variational inequalities. In Advances in Neural Information Processing Systems, 2022b.
376
+ Benjamin Halpern. Fixed points of nonexpanding maps. Bulletin of the American Mathematical Society, 73(6):957-961, 1967.
377
+ Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems (NeurIPS), 30, 2017.
378
+ Michael D Hirsch, Christos H Papadimitriou, and Stephen A Vavasis. Exponential lower bounds for finding Brouwer fix points. Journal of Complexity, 5(4):379-416, December 1989. ISSN 0885-064X. doi: 10.1016/0885-064X(89)90017-4. URL https://www.sciencedirect.com/science/article/pii/0885064X89900174.
379
+ Yu-Guan Hsieh, Franck Iutzeler, Jérôme Malick, and Panayotis Mertikopoulos. On the convergence of single-call stochastic extra-gradient methods. In Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. URL https://proceedings.neurips.cc/paper/2019/bit/4625d8e31dad7d1c4c83399a6eb62f0c-Abstract.html.
380
+ Chi Jin, Praneeth Netrapalli, and Michael Jordan. What is local optimality in nonconvex-nonconcave minimax optimization? In International conference on machine learning (ICML), pp. 4880-4889. PMLR, 2020.
381
+ Donghwan Kim. Accelerated proximal point method for maximally monotone operators. Mathematical Programming, 190(1):57-87, November 2021. ISSN 1436-4646. doi: 10.1007/s10107-021-01643-0. URL https://doi.org/10.1007/s10107-021-01643-0.
382
+
383
+ G. M. Korpelevich. The extragradient method for finding saddle points and other problems. Matecon, 12:747-756, 1976. URL https://ci.nii.ac.jp/naid/10017556617/.
384
+ Sucheol Lee and Donghwan Kim. Fast extra gradient methods for smooth structured nonconvex-nonconcave minimax problems. In Annual Conference on Neural Information Processing Systems (NeurIPS), 2021a.
385
+ Sucheol Lee and Donghwan Kim. Fast Extra Gradient Methods for Smooth Structured Nonconvex-Nonconcave Minimax Problems. Technical Report arXiv:2106.02326, arXiv, November 2021b. URL http://arxiv.org/abs/2106.02326. arXiv:2106.02326 [math] type: article.
386
+ Mingrui Liu, Youssef Mroueh, Jerret Ross, Wei Zhang, Xiaodong Cui, Payel Das, and Tianbao Yang. Towards better understanding of adaptive gradient algorithms in generative adversarial nets. In International Conference on Learning Representations (ICLR), 2019.
387
+ Mingrui Liu, Hassan Rafique, Qihang Lin, and Tianbao Yang. First-order convergence theory for weakly-convex-weakly-concave min-max problems. Journal of Machine Learning Research, 22(169):1-34, 2021.
388
+ Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. Towards deep learning models resistant to adversarial attacks. arXiv preprint arXiv:1706.06083, 2017.
389
+ Yu Malitsky. Projected Reflected Gradient Methods for Monotone Variational Inequalities. SIAM Journal on Optimization, 25(1):502-520, January 2015. ISSN 1052-6234, 1095-7189. doi: 10.1137/14097238X. URL http://arxiv.org/abs/1502.04968. arXiv: 1502.04968.
390
+ Yura Malitsky. Golden ratio algorithms for variational inequalities. Mathematical Programming, 184(1):383-410, 2020.
391
+ Yura Malitsky and Matthew K. Tam. A Forward-Backward Splitting Method for Monotone Inclusions Without Cocoercivity. SIAM Journal on Optimization, 30(2):1451-1472, January 2020. ISSN 1052-6234. doi: 10.1137/18M1207260. URL https://epubs.siam.org/doi/abs/10.1137/18M1207260. Publisher: Society for Industrial and Applied Mathematics.
392
+ Eric V Mazumdar, Michael I Jordan, and S Shankar Sastry. On finding local nash equilibria (and only local nash equilibria) in zero-sum games. arXiv preprint arXiv:1901.00838, 2019.
393
+ Aryan Mokhtari, Asuman Ozdaglar, and Sarath Pattathil. A unified analysis of extragradient and optimistic gradient methods for saddle point problems: Proximal point approach. In International Conference on Artificial Intelligence and Statistics (AISTATS), 2020a.
394
+ Aryan Mokhtari, Asuman E Ozdaglar, and Sarath Pattathil. Convergence rate of $\mathcal{O}(1 / k)$ for optimistic gradient and extragradient methods in smooth convex-concave saddle point problems. SIAM Journal on Optimization, 30(4):3230-3251, 2020b.
395
+ Arkadi Nemirovski. Prox-method with rate of convergence O $(1 / t)$ for variational inequalities with Lipschitz continuous monotone operators and smooth convex-concave saddle point problems. SIAM Journal on Optimization, 15(1):229-251, 2004. Publisher: SIAM.
396
+ Yurii Nesterov. Dual extrapolation and its applications to solving variational inequalities and related problems. Mathematical Programming, 109(2):319-344, March 2007. ISSN 1436-4646. doi: 10.1007/s10107-006-0034-z. URL https://doi.org/10.1007/s10107-006-0034-z.
397
+ Maher Nouiehed, Maziar Sanjabi, Tianjian Huang, Jason D Lee, and Meisam Razaviyayn. Solving a class of non-convex min-max games using iterative first order methods. Advances in Neural Information Processing Systems (NeurIPS), 32, 2019.
398
+
399
+ Yuyuan Ouyang and Yangyang Xu. Lower complexity bounds of first-order methods for convex-concave bilinear saddle-point problems. Mathematical Programming, 185(1):1-35, 2021.
400
+ Christos H. Papadimitriou. On the complexity of the parity argument and other inefficient proofs of existence. Journal of Computer and System Sciences, 48(3):498-532, June 1994. ISSN 0022-0000. doi: 10.1016/S0022-0000(05)80063-7. URL https://www.sciencedirect.com/science/article/pii/S0022000005800637.
401
+ Neal Parikh and Stephen Boyd. Proximal Algorithms. Foundations and Trends® in Optimization, 1(3):127-239, January 2014. ISSN 2167-3888, 2167-3918. doi: 10. 1561/2400000003. URL https://www-nowpublishers.com/article/Details/ OPT-003. Publisher: Now Publishers, Inc.
402
+ Wei Peng, Yu-Hong Dai, Hui Zhang, and Lizhi Cheng. Training GANs with centripetal acceleration. Optimization Methods and Software, 35(5):955-973, 2020. Publisher: Taylor & Francis.
403
+ Thomas Pethick, Puya Latafat, Panagiotis Patrinos, Olivier Fercoq, and Volkan Cevhera. Escaping limit cycles: Global convergence for constrained nonconvex-nonconcave minimax problems. In International Conference on Learning Representations (ICLR), 2022.
404
+ Leonid Denisovich Popov. A modification of the Arrow-Hurwicz method for search of saddle points. Mathematical notes of the Academy of Sciences of the USSR, 28(5):845-848, 1980. Publisher: Springer.
405
+ Ernest K Ryu and Stephen Boyd. Primer on monotone operator methods. Appl. Comput. Math, 15(1):3-43, 2016.
406
+ Ernest K. Ryu and Wotao Yin. Large-Scale Convex Optimization via Monotone Operators. Cambridge University Press, 2022.
407
+ Chaobing Song, Zhengyuan Zhou, Yichao Zhou, Yong Jiang, and Yi Ma. Optimistic dual extrapolation for coherent non-monotone variational inequalities. Advances in Neural Information Processing Systems (NeurIPS), 33:14303-14314, 2020.
408
+ Quoc Tran-Dinh. The Connection Between Nesterov's Accelerated Methods and Halpern Fixed-Point Iterations, March 2022. URL http://arxiv.org/abs/2203.04869.arXiv:2203.04869 [math].
409
+ Quoc Tran-Dinh and Yang Luo. Halpern-Type Accelerated and Splitting Algorithms For Monotone Inclusions. Technical Report arXiv:2110.08150, arXiv, December 2021. URL http://arxiv.org/abs/2110.08150. arXiv:2110.08150 [math, stat] type: article.
410
+ Junchi Yang, Negar Kiyavash, and Niao He. Global convergence and variance reduction for a class of nonconvex-nonconcave minimax problems. Advances in Neural Information Processing Systems (MeurIPS), 33:1153-1165, 2020.
411
+ TaeHo Yoon and Ernest K Ryu. Accelerated algorithms for smooth convex-concave minimax problems with $\mathcal{O}(1 / k^2)$ rate on squared gradient norm. In International Conference on Machine Learning (ICML), pp. 12098-12109. PMLR, 2021.
412
+ TaeHo Yoon and Ernest K Ryu. Accelerated minimax algorithms flock together. arXiv preprint arXiv:2205.11093, 2022.
413
+ Zhengyuan Zhou, Panayotis Mertikopoulos, Nicholas Bambos, Stephen Boyd, and Peter W Glynn. Stochastic mirror descent in variationally coherent optimization problems. Advances in Neural Information Processing Systems (NeurIPS), 30, 2017.
414
+
415
+ # CONTENTS
416
+
417
+ # 1 Introduction 1
418
+
419
+ 1.1 Our Contributions 2
420
+ 1.2 Related Works 3
421
+
422
+ # 2 Preliminaries 4
423
+
424
+ 2.1 Problem Formulation 5
425
+ 2.2 Convergence Measure 6
426
+
427
+ # 3 Optimistic Gradient Method for Weak MVI Problems 6
428
+
429
+ # 4 Accelerated Reflected Gradient For Negatively Comonotone Problems 8
430
+
431
+ # 5 Last-Iterate Convergence Rate of Reflected Gradient 9
432
+
433
+ # 6 Conclusion 9
434
+
435
+ # A Additional Related Works 15
436
+
437
+ A.1 Convex-Concave and Monotone Setting 15
438
+ A.2 Nonconvex-Nonconcave Setting 15
439
+
440
+ # B Additional Preliminary 15
441
+
442
+ B.1 Resolvent and Proximal Operator 15
443
+ B.2 Gap Function 16
444
+ B.3 Classical Algorithms for Variationally Inequalities 16
445
+ B.4 Tangent Residual Upper Bounds Other Notions of Residual 17
446
+
447
+ # C Missing Proofs in Section 4 17
448
+
449
+ C.1 Potential Function 17
450
+ C.2 Approximately Non-Increasing Potential 18
451
+ C.3 Bouding Potential at Iteration 1 20
452
+ C.4 Proof of Theorem 2 21
453
+
454
+ # D Missing Proofs in Section 5 22
455
+
456
+ D.1 Non-increasing Potential 22
457
+ D.2 Best-Iterate Convergence 23
458
+ D.3 Proof of Theorem 3 25
459
+
460
+ # E Numerical Illustration 25
461
+
462
+ # F Auxiliary Propositions 26
463
+
464
+ # A ADDITIONAL RELATED WORKS
465
+
466
+ # A.1 CONVEX-CONCAVE AND MONOTONE SETTING
467
+
468
+ In the convex-concave setting, a weak convergence measure is the gap function (Definition 4). It is well-known that classic extragradient-type methods such as EG and PEG have $O\left(\frac{1}{T}\right)$ average-iterate convergence rate in terms of gap function (Nemirovski, 2004; Nesterov, 2007; Mokhtari et al., 2020b; Hsieh et al., 2019) and the rate is optimal (Ouyang & Xu, 2021). But the gap function or average-iterate convergence is not meaningful in the nonconvex-nonconcave setting. For convergence in terms of the residual in the constrained setting, EG and PEG has a slower rate of $O\left(\frac{1}{\sqrt{T}}\right)$ for best-iterate convergence (Korpelevich, 1976; Popov, 1980; Facchinei & Pang, 2003; Hsieh et al., 2019) and the more desirable last-iterate convergence (Cai et al., 2022b; Gorbunov et al., 2022b). We remark that the last-iterate convergence rate of the reflected gradient (RG) algorithm was unknown. The $O\left(\frac{1}{\sqrt{T}}\right)$ rate is tight for $p$ -SCIL algorithms (Golowich et al., 2020), a subclass of first-order methods that includes EG, PEG, and many of its variations, but faster rate is possible for other first-order methods.
469
+
470
+ Accelerated Convergence Rate in Residual. Recent results with accelerated convergence rates in terms of the residual are based on Halpern iteration (Halpern, 1967) (also called Anchoring). The vanilla Halpern iteration has $O\left(\frac{1}{T}\right)$ convergence rate for cocoercive operators (stronger than monotonicity) (Diakonikolas, 2020; Kim, 2021). Recently, a line of works contributed to provide $O\left(\frac{1}{T}\right)$ convergence rate for monotone operators in the constrained setting. Diakonikolas (2020); Yoon & Ryu (2022) provide double-loop algorithms with $O\left(\frac{\log T}{T}\right)$ convergence rate for monotone operators in the constrained setting. In the unconstrained setting ( $A = 0$ ), Yoon & Ryu (2021) propose the Extra Anchored Gradient (EAG) algorithm, the first efficient algorithm with $O\left(\frac{1}{T}\right)$ convergence rate for monotone operators. They also establish matching lower bound for first-order methods. Lee & Kim (2021a) generalize EAG to Fast Extragradient (FEG), which works even for negatively comonotone operators but still in the unconstrained setting. Analysis for variants of EAG and FEG in the unconstrained setting is provided in (Tran-Dinh & Luo, 2021; Tran-Dinh, 2022). Recently, Cai et al. (2022a) close the open problem by proving the projected version of EAG has $O\left(\frac{1}{T}\right)$ convergence rate. They also propose the accelerated forward-backward splitting (AS) algorithm, a generalization of FEG, which has $O\left(\frac{1}{T}\right)$ convergence rate for negatively comonotone operators in the constrained setting.
471
+
472
+ # A.2 NONCONVEX-NONCONCAVE SETTING
473
+
474
+ This paper study structured nonconvex-nonconcave optimization problems from the general perspective of operator theory and focus on global convergence under weak MVI and negative comonotonicity. There is a line of works focusing on local convergence, e.g., (Heusel et al., 2017; Mazumdar et al., 2019; Jin et al., 2020; Fiez & Ratliff, 2021). Another line of works focuses on problems satisfying different structural assumptions, such as the Polyak Łojasiewicz condition (Nouiehed et al., 2019; Yang et al., 2020).
475
+
476
+ # B ADDITIONAL PRELIMINARY
477
+
478
+ # B.1 RESOLVENT AND PROXIMAL OPERATOR
479
+
480
+ When $A = \partial g$ is the subdifferential operator of a lower semi-continuous, proper, and convex function $f$ , its resolvent $(I + \lambda \partial g)^{-1}$ is also known as the proximal operator of $g$ denoted as $\mathbf{prox}_{\lambda g}$ . The resolvent $(I + \lambda \partial g)^{-1}$ is efficiently computable for the following popular choices of function $g$ : $\ell_1$ -norm $||\cdot||_1$ , $\ell_2$ -norm $||\cdot||_2$ , maxtrix norms, the log-barrier $-\sum_{i=1}^{n}\log(x_i)$ , and more generally any quadratic or smooth functions. Moreover, many of them have closed-form expressions. For example, the proximal operator of the $\ell_1$ -norm $g =$
481
+
482
+ $\| \cdot \| _1$ is the element-wise soft-thresholding operator $(\mathbf{prox}_{\lambda g}(v))_i = (v_i - \lambda)_+ - (-v_i - \lambda)_+$ . We refer readers to (Parikh & Boyd, 2014, Chapter 6, 7) for a comprehensive review on proximal operators and their efficient computation.
483
+
484
+ # B.2 GAP FUNCTION
485
+
486
+ A standard suboptimality measure for the variationally inequalitt (VI) problem is the gap function defined as $\mathrm{GAP}_{\mathcal{Z},F}(z) \coloneqq \max_{z' \in \mathcal{Z}} \langle F(z), z - z' \rangle$ . Note that when the feasible set $\mathcal{Z}$ is unbounded, approximating the gap function is impossible: consider the simple unconstrained saddle point problem $\min_{x \in \mathbb{R}} \max_{y \in \mathbb{R}} xy$ , which has a unique saddle point $(0,0)$ but any other point has an infinitely large gap. A refined notion is the following restricted gap function (Nesterov, 2007), which is meaningful for unbounded $\mathcal{Z}$ .
487
+
488
+ Definition 4 (Restricted Gap Function). Given a closed convex set $\mathcal{Z}$ , a single-valued operator $F$ , and a radius $D$ , the restricted gap function at point $z \in \mathcal{Z}$ is
489
+
490
+ $$
491
+ \operatorname {GAP}_{\mathcal{Z},F,D}:= \max_{z^{\prime}\in \mathcal{Z}\cap \mathcal{B}(z,D)}\big\langle F(z),z - z^{\prime}\big\rangle
492
+ $$
493
+
494
+ where $\mathcal{B}(z,D)$ is a Euclidean ball centered at $z$ with radius $D$ .
495
+
496
+ In the rest of the paper, we call $\mathrm{GAP}_{\mathcal{Z},F,D}$ the gap function (or gap) for convenience. The following Lemma relates $\| F(z) + c\|$ where $c\in N_{\mathcal{Z}}(z)$ , and the gap function.
497
+
498
+ Lemma 1. Let $\mathcal{Z}$ be a closed convex set $\mathcal{Z}$ and $F$ be a monotone and L-Lipschitz operator. For any $z\in \mathcal{Z}$ and $c\in N_{\mathcal{Z}}(z)$ , we have
499
+
500
+ $$
501
+ \operatorname {GAP}_{\mathcal{Z},F,D}(z):= \max_{z^{\prime}\in \mathcal{Z}\cap \mathcal{B}(z,D)}\big\langle F(z),z - z^{\prime}\big\rangle \leq D\cdot \| F(z) + c\| .
502
+ $$
503
+
504
+ Proof. The proof is straightforward. Since $c \in N_{\mathcal{Z}}(z)$ , we have $\langle c, z - z' \rangle \geq 0$ for any $z' \in \mathcal{Z}$ . Therefore,
505
+
506
+ $$
507
+ \max _ {z ^ {\prime} \in \mathcal {Z} \cap \mathcal {B} (z, D)} \left\langle F (z), z - z ^ {\prime} \right\rangle \leq \max _ {z ^ {\prime} \in \mathcal {Z} \cap \mathcal {B} (z, D)} \left\langle F (z) + c, z - z ^ {\prime} \right\rangle
508
+ $$
509
+
510
+ $$
511
+ \leq \max _ {z ^ {\prime} \in \mathcal {Z} \cap \mathcal {B} (z, D)} \| z - z ^ {\prime} \| \cdot \| F (z) + c \|
512
+ $$
513
+
514
+ (Cauchy-Schwarz inequality)
515
+
516
+ $$
517
+ \leq D \cdot \| F (z) + c \|.
518
+ $$
519
+
520
+ ![](images/0ae0cbbb4fe389eb501dd44a61633b8d9e0607baee287f2fdb1c0d6cf593026c.jpg)
521
+
522
+ # B.3 CLASSICAL ALGORITHMS FOR VARIATIONAL INEQUALITIES
523
+
524
+ The Extragradient Algorithm (Korpelevich, 1976). Starting at initial point $z_0 \in \mathcal{Z}$ , the update rule of EG is: for $t = 0, 1, 2, \dots$
525
+
526
+ $$
527
+ z _ {t + \frac {1}{2}} = \Pi_ {\mathcal {Z}} [ z _ {t} - \eta F (z _ {t}) ],
528
+ $$
529
+
530
+ $$
531
+ z _ {t + 1} = \Pi_ {\mathcal {Z}} \left[ z _ {t} - \eta F \left(z _ {t + \frac {1}{2}}\right) \right]. \tag {EG}
532
+ $$
533
+
534
+ At each step $t \geq 0$ , the EG algorithm makes an oracle call of $F(z_{t})$ to produce an intermediate point $z_{t + \frac{1}{2}}$ (a gradient descent step if $F = \partial f$ is the gradient of some function $f$ ), then the algorithm makes another oracle call $F(z_{t + \frac{1}{2}})$ and updates $z_{t}$ to $z_{t + 1}$ . In each step, EG needs two oracle calls to $F$ and two projections $\Pi_{\mathcal{Z}}$ .
535
+
536
+ The Past Extragradient Algorithm (Popov, 1980) Starting at initial point $z_0 = z_{-\frac{1}{2}} \in \mathcal{Z}$ , the update rule of PEG with step size $\eta > 0$ is: for $t = 0, 1, 2, \dots$
537
+
538
+ $$
539
+ z _ {t + \frac {1}{2}} = \Pi_ {\mathcal {Z}} \left[ z _ {t} - \eta F \left(z _ {t - \frac {1}{2}}\right) \right], \tag {PEG}
540
+ $$
541
+
542
+ $$
543
+ z _ {t + 1} = \Pi_ {\mathcal {Z}} \left[ z _ {t} - \eta F \left(z _ {t + \frac {1}{2}}\right) \right].
544
+ $$
545
+
546
+ Note that PEG is also known as the Optimistic Gradient Descent/Ascent (OGDA) algorithm in the literature. The update rule of PEG is similar to (EG) but only requires a single call to $F$ in each iteration. Both of EG and PEG perform two projections in every iteration.
547
+
548
+ # B.4 TANGENT RESIDUAL UPPER BOUNDS OTHER NOTIONS OF RESIDUAL
549
+
550
+ Proposition 2. Let $A$ be a maximally monotone operator and $F$ be an single-valued operator. Then for any $z \in \mathbb{R}^n$ and $\alpha > 0$ ,
551
+
552
+ $$
553
+ r _ {F, A} ^ {t a n} (z) \geq r _ {F, A} ^ {n a t} (z) := \| z - J _ {A} (z - F (z)) \|
554
+ $$
555
+
556
+ $$
557
+ r _ {F, A} ^ {t a n} (z) \geq r _ {F, A, \alpha} ^ {f b} (z) := \frac {1}{\alpha} \| z - J _ {\alpha A} [ z - \alpha F (z) ] \|.
558
+ $$
559
+
560
+ Proof. For any $c \in A(z)$ , we have
561
+
562
+ $$
563
+ \begin{array}{l} r _ {F, A} ^ {n a t} (z) = \left\| z - J _ {A} (z - F (z)) \right\| \\ = \| J _ {A} (z + c) - J _ {A} (z - F (z)) \| \\ \leq \| F (z) + c \| \quad (J _ {A} \text {i s n o n - e x p a n s i v e}) \\ \end{array}
564
+ $$
565
+
566
+ and
567
+
568
+ $$
569
+ \begin{array}{l} r _ {F, A, \alpha} ^ {f b} (z) = \frac {1}{\alpha} \| z - J _ {\alpha A} (z - \alpha F (z)) \| \\ = \frac {1}{\alpha} \| J _ {\alpha A} (z + \alpha c) - J _ {\alpha A} (z - \alpha F (z)) \| \\ \leq \| F (z) + c \|. \quad \left(J _ {A} \text {i s n o n - e x p a n s i v e}\right) \\ \end{array}
570
+ $$
571
+
572
+ Thus both $r_{F,A}^{tan}(z)$ and $r_{F,A,\alpha}^{fb}(z)$ are smaller than $r_{F,A}^{tan}(z) = \min_{c\in A(z)}\| F(z) + c\|$ .
573
+
574
+ # C MISSING PROOFS IN SECTION 4
575
+
576
+ To prove Theorem 2, we apply a potential function argument. We first show the potential function is approximately non-increasing and then prove that it is upper bounded by a term independent of $T$ . As the potential function at step $t$ is also at least $\Omega(t^2) \cdot r^{\tan(z_t)^2}$ , we conclude that $\mathrm{ARG}$ has an $O\left(\frac{1}{T}\right)$ convergence rate.
577
+
578
+ # C.1 POTENTIAL FUNCTION
579
+
580
+ Recall the update rule of ARG: $z_0 = z_{\frac{1}{2}} \in \mathbb{R}^n$ are initial points and $z_1 = J_{\eta A}[z_0 - \eta F(z_0)]$ ; for $t \geq 1$ ,
581
+
582
+ $$
583
+ z _ {t + \frac {1}{2}} = 2 z _ {t} - z _ {t - 1} + \frac {1}{t + 1} \left(z _ {0} - z _ {t}\right) - \frac {1}{t} \left(z _ {0} - z _ {t - 1}\right), \tag {ARG}
584
+ $$
585
+
586
+ $$
587
+ z _ {t + 1} = J _ {\eta A} \Big [ z _ {t} - \eta F (z _ {t + \frac {1}{2}}) + \frac {1}{t + 1} (z _ {0} - z _ {t}) \Big ].
588
+ $$
589
+
590
+ Recall that when $A$ is the normal cone of a closed convex set $\mathcal{Z}$ , the resolvent $J_{A}$ is equivalent to Euclidean projection to set $\mathcal{Z}$ . Hence, if we apply the ARG algorithm to solve monotone VI problems, the algorithm uses a single call to operator $F$ and a single projection to $\mathcal{Z}$ per iteration. Here we allow $A$ to be an arbitrary maximally monotone operator, and the ARG algorithm becomes a single-call single-resolvent algorithm in this more general setting.
591
+
592
+ Next, we specify the potential function. Define
593
+
594
+ $$
595
+ c _ {t + 1} := \frac {z _ {t} - \eta F \left(z _ {t + \frac {1}{2}}\right) + \frac {1}{t + 1} \left(z _ {0} - z _ {t}\right) - z _ {t + 1}}{\eta}, \quad \forall t \geq 0. \tag {8}
596
+ $$
597
+
598
+ By update rule we have $c_{t} \in A(z_{t})$ for all $t \geq 1$ . The potential function at iterate $t \geq 1$ is defined as
599
+
600
+ $$
601
+ V _ {t} := \frac {t (t + 1)}{2} \| \eta F (z _ {t}) + \eta c _ {t} \| ^ {2} + \frac {t (t + 1)}{2} \left\| \eta F (z _ {t}) - \eta F \left(z _ {t - \frac {1}{2}}\right) \right\| ^ {2} + t \langle \eta F (z _ {t}) + \eta c _ {t}, z _ {t} - z _ {0} \rangle . \tag {9}
602
+ $$
603
+
604
+ # C.2 APPROXIMATELY NON-INCRESING POTENTIAL
605
+
606
+ Fact 2. For any $L > 0$ and $\rho \geq -\frac{1}{60L}$ . There exists $\eta > 0$ such that
607
+
608
+ $$
609
+ \frac {1}{2} - \left(1 2 - \frac {4 \rho}{\eta}\right) \eta^ {2} L ^ {2} + \frac {2 \rho}{\eta} \geq 0. \tag {10}
610
+ $$
611
+
612
+ Moreover, every $\eta > 0$ satisfies (10) also satisfies $\frac{\rho}{\eta} \geq -\frac{1}{4}$ .
613
+
614
+ Proof. Rewriting (10), we get
615
+
616
+ $$
617
+ \rho > \frac {\eta L (2 4 \eta^ {2} L ^ {2} - 1)}{4 + 8 \eta^ {2} L ^ {2}} \cdot \frac {1}{L}.
618
+ $$
619
+
620
+ Let $x = \eta L$ and $f(x) = \frac{x(24x^2 - 1)}{4 + 8x^2}$ . Since $f\left(\frac{1}{12}\right) = -\frac{5}{292} < -\frac{1}{60}$ . We know $\eta = \frac{1}{12L}$ satisfies (10).
621
+
622
+ Moreover, rewriting (10) and using $\eta L > 0$ , we get
623
+
624
+ $$
625
+ \frac {\rho}{\eta} \geq - \frac {1 - 7 2 \eta^ {2} L ^ {2}}{4 + 8 \eta^ {2} L ^ {2}} \geq - \frac {1}{4}.
626
+ $$
627
+
628
+ ![](images/6ff84b5364a62d0936416c6dea2730b4d632f6592c09490fecb0a0f7f1180b3c.jpg)
629
+
630
+ We show in the following lemma that $V_{t}$ is approximately non-increasing.
631
+
632
+ Lemma 2. In the same setup as Theorem 2, for any $t \geq 1$ , we have
633
+
634
+ $$
635
+ V _ {t + 1} \leq V _ {t} + \frac {1}{8} \cdot \| \eta F (z _ {t + 1}) + \eta c _ {t + 1} \| ^ {2}.
636
+ $$
637
+
638
+ Proof. The plan is to show that $V_{t} - V_{t + 1}$ plus a few non-positive terms is still $\geq -\frac{1}{8} \cdot \| \eta F(z_{t + 1}) + \eta c_{t + 1} \|^2$ , which certifies the claim.
639
+
640
+ Two Positive Terms. Since $F + A$ is $\rho$ -comonotone, we have
641
+
642
+ $$
643
+ \left\langle \eta F \left(z _ {t + 1}\right) + \eta c _ {t + 1} - \eta F \left(z _ {t}\right) - \eta c _ {t}, z _ {t + 1} - z _ {t} \right\rangle - \frac {\rho}{\eta} \| \eta F \left(z _ {t + 1}\right) + \eta c _ {t + 1} - \eta F \left(z _ {t}\right) - \eta c _ {t} \| ^ {2} \geq 0. \tag {11}
644
+ $$
645
+
646
+ Since $F$ is $L$ -Lipschitz, we have
647
+
648
+ $$
649
+ \eta^ {2} L ^ {2} \cdot \left\| z _ {t + 1} - z _ {t + \frac {1}{2}} \right\| ^ {2} - \left\| \eta F (z _ {t + 1}) - \eta F (z _ {t + \frac {1}{2}}) \right\| ^ {2} \geq 0.
650
+ $$
651
+
652
+ Denote $p = \frac{1}{24}$ . Multiplying the above inequality with $1 - \frac{\rho}{3\eta} > 0$ and rearranging terms, we get
653
+
654
+ $$
655
+ \begin{array}{l} p \cdot \left\| z _ {t + 1} - z _ {t + \frac {1}{2}} \right\| ^ {2} - \left\| \eta F (z _ {t + 1}) - \eta F (z _ {t + \frac {1}{2}}) \right\| ^ {2} \\ + \left((1 - \frac {\rho}{3 \eta}) \eta^ {2} L ^ {2} - p\right) \cdot \left\| z _ {t + 1} - z _ {t + \frac {1}{2}} \right\| ^ {2} + \frac {\rho}{3 \eta} \left\| \eta F \left(z _ {t + 1}\right) - \eta F \left(z _ {t + \frac {1}{2}}\right) \right\| ^ {2} \geq 0. \tag {12} \\ \end{array}
656
+ $$
657
+
658
+ Sum-of-Squares Identity. We show an equivalent formulation $z_{t + \frac{1}{2}}$ and $z_{t + 1}$ using definitions of $\eta c_t = z_{t - 1} - z_t - \eta F(z_{t - \frac{1}{2}}) + \frac{1}{t} (z_0 - z_{t - 1})$ and $\eta c_{t + 1} = z_t - \eta F(z_{t + \frac{1}{2}}) + \frac{1}{t + 1} (z_0 - z_t) - z_{t + 1}$ :
659
+
660
+ $$
661
+ \begin{array}{l} z _ {t + \frac {1}{2}} = 2 z _ {t} - z _ {t - 1} + \frac {1}{t + 1} (z _ {0} - z _ {t}) - \frac {1}{t} (z _ {0} - z _ {t - 1}) \\ = z _ {t} + \left(z _ {t} - z _ {t - 1}\right) + \frac {1}{t + 1} \left(z _ {0} - z _ {t}\right) - \frac {1}{t} \left(z _ {0} - z _ {t - 1}\right) \\ = z _ {t} - \eta F \left(z _ {t - \frac {1}{2}}\right) - \eta c _ {t} + \frac {1}{t + 1} \left(z _ {0} - z _ {t}\right), \\ \end{array}
662
+ $$
663
+
664
+ $$
665
+ z _ {t + 1} = z _ {t} - \eta F \left(z _ {t + \frac {1}{2}}\right) - \eta c _ {t + 1} + \frac {1}{t + 1} \left(z _ {0} - z _ {t}\right).
666
+ $$
667
+
668
+ We also have
669
+
670
+ $$
671
+ z _ {t + 1} - z _ {t + \frac {1}{2}} = \eta F \left(z _ {t - \frac {1}{2}}\right) + \eta c _ {t} - \eta F \left(z _ {t + \frac {1}{2}}\right) - \eta c _ {t + 1}. \tag {13}
672
+ $$
673
+
674
+ Next, we simplify
675
+
676
+ $$
677
+ V _ {t} - V _ {t + 1} - t (t + 1) \times \text {L H S} (1 1) - \frac {t (t + 1)}{4 p} \times \text {L H S} (1 2)
678
+ $$
679
+
680
+ using the second identity in Proposition 3: replace $x_0$ with $z_0$ ; for $k \in [4]$ , replace $x_k$ with $z_{t-1+\frac{k}{2}}$ and replace $y_k$ with $\eta F(z_{t-1+\frac{k}{2}})$ ; replace $u_2$ with $\eta c_t$ ; replace $u_4$ with $\eta c_{t+1}$ ; replace $k$ with $t$ ; replace $p$ with $q$ . Note that $x_3 = x_2 - y_1 - u_2 + \frac{1}{k+1}(x_0 - x_2)$ and $x_4 = x_2 - y_3 - u_4 + \frac{1}{k+1}(x_0 - x_2)$ hold due to the above equivalent formations of $z_{t+\frac{1}{2}}$ and $z_{t+1}$ . Expression (17) and (18) appear on both sides of the following equation.
681
+
682
+ $$
683
+ \begin{array}{l} V _ {t} - V _ {t + 1} - t (t + 1) \times \text {L H S} (1 1) - \frac {t (t + 1)}{4 p} \times \text {L H S} (1 2) \\ = \frac {t (t + 1)}{4} \left\| \eta c _ {t + 1} - \eta c _ {t} + \eta F \left(z _ {t - \frac {1}{2}}\right) - 2 \eta F \left(z _ {t}\right) + \eta F \left(z _ {t + \frac {1}{2}}\right) \right\| ^ {2} (14) \\ + \left(\frac {(1 - 4 p) t - 4 p}{4 p} (t + 1)\right) \cdot \left\| \eta F \left(z _ {t + \frac {1}{2}}\right) - \eta F \left(z _ {t + 1}\right) \right\| ^ {2} (15) \\ + (t + 1) \cdot \left\langle \eta F \left(z _ {t + \frac {1}{2}}\right) - \eta F \left(z _ {t + 1}\right), \eta F \left(z _ {t + 1}\right) + \eta c _ {t + 1} \right\rangle (16) \\ + t (t + 1) \frac {\rho}{\eta} \cdot \left\| \eta F \left(z _ {t + 1}\right) + \eta c _ {t + 1} - \eta F \left(z _ {t}\right) - \eta c _ {t} \right\| ^ {2} (17) \\ \left. - \frac {t (t + 1)}{4 p} \cdot \left(\left((1 - \frac {\rho}{3 \eta}) \eta^ {2} L ^ {2} - p\right) \cdot \left\| z _ {t + 1} - z _ {t + \frac {1}{2}} \right\| ^ {2} + \frac {\rho}{3 \eta} \left\| \eta F \left(z _ {t + 1}\right) - \eta F \left(z _ {t + \frac {1}{2}}\right) \right\| ^ {2}\right). \right. (18) \\ \end{array}
684
+ $$
685
+
686
+ Since $\| a\|^2 + \langle a, b \rangle = \| a + \frac{b}{2}\|^2 - \frac{\|b\|^2}{4}$ , we have
687
+
688
+ Expression (15) + Expression (16)
689
+
690
+ $$
691
+ \begin{array}{l} = \left\| \sqrt {\frac {(1 - 4 p) t - 4 p}{4 p} (t + 1)} \cdot \left(\eta F \left(z _ {t + \frac {1}{2}}\right) - \eta F \left(z _ {t + 1}\right)\right) + \sqrt {\frac {p (t + 1)}{(1 - 4 p) t - 4 p}} \cdot \left(\eta F \left(z _ {t + 1}\right) + \eta c _ {t + 1}\right) \right\| ^ {2} \\ - \frac {p (t + 1)}{(1 - 4 p) t - 4 p} \cdot \| \eta F (z _ {t + 1}) + \eta c _ {t + 1} \| ^ {2} \\ \end{array}
692
+ $$
693
+
694
+ $$
695
+ \begin{array}{l} \geq - \frac {p (t + 1)}{(1 - 8 p) t} \cdot \| \eta F (z _ {t + 1}) + \eta c _ {t + 1} \| ^ {2} \quad (t \geq 1) \\ \geq - \frac {2 p}{1 - 8 p} \cdot \| \eta F (z _ {t + 1}) + \eta c _ {t + 1} \| ^ {2} \quad \left(\frac {t + 1}{t} \leq 2\right) \\ = - \frac {1}{8} \| \eta F (z _ {t + 1}) + \eta c _ {t + 1} \| ^ {2}. \quad (p = \frac {1}{2 4}) \\ \end{array}
696
+ $$
697
+
698
+ Now it remains to show that the sum of Expression (14), (17), and (18) is non-negative. Multiplying $\frac{4}{t(t + 1)}$ and replacing $p = \frac{1}{24}$ , we get
699
+
700
+ $$
701
+ \begin{array}{l} \frac {4}{t (t + 1)} \cdot (\text {E x p r e s s i o n} (1 4) + \text {E x p r e s s i o n} (1 7) + \text {E x p r e s s i o n} (1 8)) \\ = \left\| \eta c _ {t + 1} - \eta c _ {t} + \eta F (z _ {t - \frac {1}{2}}) - 2 \eta F (z _ {t}) + \eta F (z _ {t + \frac {1}{2}}) \right\| ^ {2} + \left(1 - (2 4 - \frac {8 \rho}{\eta}) \eta^ {2} L ^ {2}\right) \cdot \left\| z _ {t + 1} - z _ {t + \frac {1}{2}} \right\| ^ {2} \\ + \frac {4 \rho}{\eta} \cdot \| \eta F (z _ {t + 1}) + \eta c _ {t + 1} - \eta F (z _ {t}) - \eta c _ {t} \| ^ {2} - \frac {8 \rho}{\eta} \Big \| \eta F (z _ {t + 1}) - \eta F (z _ {t + \frac {1}{2}}) \Big \| ^ {2}. \\ \end{array}
702
+ $$
703
+
704
+ Denote
705
+
706
+ $$
707
+ B _ {1} = \eta c _ {t + 1} - \eta c _ {t} + \eta F (z _ {t - \frac {1}{2}}) - 2 \eta F (z _ {t}) + \eta F (z _ {t + \frac {1}{2}})
708
+ $$
709
+
710
+ $$
711
+ B _ {2} = z _ {t + 1} - z _ {t + \frac {1}{2}} = \eta F \left(z _ {t - \frac {1}{2}}\right) + \eta c _ {t} - \eta F \left(z _ {t + \frac {1}{2}}\right) - \eta c _ {t + 1} \tag {By(13)}
712
+ $$
713
+
714
+ $$
715
+ B _ {3} = \eta F (z _ {t + 1}) + \eta c _ {t + 1} - \eta F (z _ {t}) - \eta c _ {t}
716
+ $$
717
+
718
+ $$
719
+ B _ {4} = \eta F (z _ {t + 1}) - \eta F (z _ {t + \frac {1}{2}}).
720
+ $$
721
+
722
+ It is not hard to check that $B_{1} - B_{2} = 2(B_{3} - B_{4})$ :
723
+
724
+ $$
725
+ B _ {1} - B _ {2} = 2 \eta c _ {t + 1} - 2 \eta c _ {t} - 2 \eta F (z _ {t}) + 2 \eta F (z _ {t + \frac {1}{2}}) = 2 (B _ {3} - B _ {4}).
726
+ $$
727
+
728
+ Note that $\rho$ is non-positive and we have
729
+
730
+ $$
731
+ \begin{array}{l} \frac {4}{t (t + 1)} \cdot (\text {E x p r e s s i o n} (1 4) + \text {E x p r e s s i o n} (1 7) + \text {E x p r e s s i o n} (1 8)) \\ = \| B _ {1} \| ^ {2} + \left(1 - (2 4 - \frac {8 \rho}{\eta}) \eta^ {2} L ^ {2}\right) \cdot \| B _ {2} \| ^ {2} + \frac {\rho}{\eta} \cdot \| 2 B _ {3} \| ^ {2} - \frac {2 \rho}{\eta} \| 2 B _ {4} \| ^ {2} \\ \geq \left(\frac {1}{2} - (1 2 - \frac {4 \rho}{\eta}) \eta^ {2} L ^ {2}\right) \cdot \| B _ {1} - B _ {2} \| ^ {2} + \frac {\rho}{\eta} \cdot \| 2 B _ {3} \| ^ {2} - \frac {2 \rho}{\eta} \| 2 B _ {4} \| ^ {2} \\ \left(\| a \| ^ {2} + \| b \| ^ {2} \geq \frac {1}{2} \| a - b \| ^ {2} \text {a n d} (2 4 - \frac {8 \rho}{\eta}) \eta^ {2} L ^ {2} \geq 0\right) \\ \geq \left(\frac {1}{2} - (1 2 - \frac {4 \rho}{\eta}) \eta^ {2} L ^ {2}\right) \cdot \| B _ {1} - B _ {2} \| ^ {2} + \frac {2 \rho}{\eta} \cdot \| 2 B _ {3} - 2 B _ {4} \| ^ {2} \\ \left(- \| a \| ^ {2} + 2 \| b \| ^ {2} \geq - 2 \| a - b \| ^ {2} \text {a n d} - \frac {\rho}{\eta} \geq 0\right) \\ = \left(\frac {1}{2} - (1 2 - \frac {4 \rho}{\eta}) \eta^ {2} L ^ {2} + \frac {2 \rho}{\eta}\right) \cdot \| B _ {1} - B _ {2} \| ^ {2} \quad (B _ {1} - B _ {2} = 2 (B _ {3} - B _ {4})) \\ \geq 0. \quad (\text {I n e q u a l i t y (1 0)}) \\ \end{array}
732
+ $$
733
+
734
+ The last inequality holds by the choice of $\eta$ as shown in Fact 2.
735
+
736
+ # C.3 BOUDING POTENTIAL AT ITERATION 1
737
+
738
+ Lemma 3. Let $F$ be a L-Lipschitz operator and $A$ be a maximally monotone operator. For any $z_0 = z_{\frac{1}{2}} \in \mathbb{R}^n$ , $\eta \in (0, \frac{1}{2L})$ , and $z_1 = J_{\eta A}[z_0 - \eta F(z_0)]$ , we have the following
739
+
740
+ 1. $\| z_1 - z_0\| \leq \eta \cdot r_{F,A}^{tan}(z_0)$
741
+ 2. $\| \eta F(z_1) + \eta c_1\| \leq (1 + \eta L)\| z_1 - z_0\|$
742
+ 3. $V_{1}\leq 4\| z_{1} - z_{0}\|^{2}$ where $V_{1}$ is defined in (9).
743
+
744
+ Proof. For any $c \in A(z_0)$ , due to non-expansiveness of $J_{\eta A}$ , we have
745
+
746
+ $$
747
+ \left\| z _ {1} - z _ {0} \right\| = \left\| J _ {\eta A} \left[ z _ {0} - \eta F (z _ {0}) \right] - J _ {\eta A} \left[ z _ {0} + \eta c \right] \right\| \leq \eta \| F (z _ {0}) + c \|.
748
+ $$
749
+
750
+ Thus $\| z_1 - z_0\| \leq \eta \cdot r_{F,A}^{tan}(z_0)$
751
+
752
+ By definition of $V_{1}$ in (9), we have
753
+
754
+ $$
755
+ V _ {1} = \left\| \eta F (z _ {1}) + \eta c _ {1} \right\| ^ {2} + \left\| \eta F (z _ {1}) - \eta F (z _ {0}) \right\| ^ {2} + \langle \eta F (z _ {1}) + \eta c _ {1}, z _ {1} - z _ {0} \rangle .
756
+ $$
757
+
758
+ We bound $\| \eta F(z_1) + \eta c_1\|$ first. Note that by definition, we have $\eta c_{1} = z_{0} - \eta F(z_{0}) - z_{1}$ . Thus we have
759
+
760
+ $$
761
+ \begin{array}{l} \left\| \eta F \left(z _ {1}\right) + \eta c _ {1} \right\| = \left\| z _ {0} - z _ {1} + \eta F \left(z _ {1}\right) - \eta F \left(z _ {0}\right) \right\| \\ \leq \left\| z _ {0} - z _ {1} \right\| + \left\| \eta F \left(z _ {1}\right) - \eta F \left(z _ {0}\right) \right\| \quad (\text {t r i a n g l e i n e q u a l i t y}) \\ \leq \left(1 + \eta L\right) \| z _ {1} - z _ {0} \|. \quad (F \text {i s} L \text {- L i p s c h i t z}) \\ \end{array}
762
+ $$
763
+
764
+ Then we can apply the bound on $\| \eta F(z_1) + \eta c_1 \|$ to bound $V_1$ as follows:
765
+
766
+ $$
767
+ \begin{array}{l} V _ {1} = \left\| \eta F (z _ {1}) + \eta c _ {1} \right\| ^ {2} + \left\| \eta F (z _ {1}) - \eta F (z _ {0}) \right\| ^ {2} + \langle \eta F (z _ {1}) + \eta c _ {1}, z _ {1} - z _ {0} \rangle \\ \leq \left\| \eta F (z _ {1}) + \eta c _ {1} \right\| ^ {2} + \eta^ {2} L ^ {2} \left\| z _ {1} - z _ {0} \right\| ^ {2} + \left\| \eta F (z _ {1}) + \eta c _ {1} \right\| \left\| z _ {1} - z _ {0} \right\| \\ \leq (1 + \eta L) ^ {2} \| z _ {1} - z _ {0} \| ^ {2} + \eta^ {2} L ^ {2} \| z _ {1} - z _ {0} \| ^ {2} + (1 + \eta L) \| z _ {1} - z _ {0} \| ^ {2} \\ = \left(2 + 3 \eta L + 2 \eta^ {2} L ^ {2}\right) \| z _ {1} - z _ {0} \| ^ {2} \\ \leq 4 \left\| z _ {1} - z _ {0} \right\| ^ {2}. \\ \end{array}
768
+ $$
769
+
770
+ where we use $L$ -Lipschitzness of $F$ and Cauchy-Schwarz inequality in the first inequality; we use $\| \eta F(z_1) + \eta c_1 \| \leq (1 + \eta L) \| z_1 - z_0 \|$ in the second inequality; we use $\eta L \leq \frac{1}{2}$ in the last inequality.
771
+
772
+ # C.4 PROOF OF THEOREM 2
773
+
774
+ We first show that the potential function $V_{t} = \Omega (t^{2}\cdot r^{\tan}(z_{t})^{2})$ .
775
+
776
+ Lemma 4. In the same setup as Theorem 2, for any $t \geq 1$ , we have
777
+
778
+ $$
779
+ \frac {t \left(t + \frac {1}{2}\right)}{4} \| \eta F (z _ {t}) + \eta c _ {t} \| ^ {2} \leq V _ {t} + \| z ^ {*} - z _ {0} \| ^ {2}.
780
+ $$
781
+
782
+ Proof. Since $0 \in F(z^{*}) + A(z^{*})$ , by $\rho$ -comonotonicity of $F + A$ and Fact 2, we have
783
+
784
+ $$
785
+ \left\langle \eta F \left(z _ {t}\right) + \eta c _ {t}, z _ {t} - z ^ {*} \right\rangle \geq \frac {\rho}{\eta} \| \eta F \left(z _ {t}\right) + \eta c _ {t} \| ^ {2} \geq - \frac {1}{4} \| \eta F \left(z _ {t}\right) + \eta c _ {t} \| ^ {2}. \tag {19}
786
+ $$
787
+
788
+ By definition of $V_{t}$ in (9), for any $t\geq 1$ , we have
789
+
790
+ $$
791
+ \begin{array}{l} V _ {t} = \frac {t (t + 1)}{2} \| \eta F (z _ {t}) + \eta c _ {t} \| ^ {2} + \frac {t (t + 1)}{2} \left\| \eta F (z _ {t}) - \eta F \left(z _ {t - \frac {1}{2}}\right) \right\| ^ {2} + t \langle \eta F (z _ {t}) + \eta c _ {t}, z _ {t} - z _ {0} \rangle \\ \geq \frac {t (t + 1)}{2} \| \eta F (z _ {t}) + \eta c _ {t} \| ^ {2} + t \langle \eta F (z _ {t}) + \eta c _ {t}, z _ {t} - z ^ {*} \rangle + t \langle \eta F (z _ {t}) + \eta c _ {t}, z ^ {*} - z _ {0} \rangle \\ \geq \frac {t (t + 1)}{2} \| \eta F \left(z _ {t}\right) + \eta c _ {t} \| ^ {2} - \frac {1}{4} \| \eta F \left(z _ {t}\right) + \eta c _ {t} \| ^ {2} + t \langle \eta F \left(z _ {t}\right) + \eta c _ {t}, z ^ {*} - z _ {0} \rangle \tag {By Inequality(19)} \\ \geq \frac {t (t + \frac {1}{2})}{2} \| \eta F (z _ {t}) + \eta c _ {t} \| ^ {2} - \frac {t (t + \frac {1}{2})}{4} \| \eta F (z _ {t}) + \eta c _ {t} \| ^ {2} - \frac {t}{t + \frac {1}{2}} \| z ^ {*} - z _ {0} \| ^ {2} \\ \geq \frac {t \left(t + \frac {1}{2}\right)}{4} \| \eta F (z _ {t}) + \eta c _ {t} \| ^ {2} - \| z ^ {*} - z _ {0} \| ^ {2} \quad \left(\frac {t}{t + \frac {1}{2}} < 1\right) \\ \end{array}
792
+ $$
793
+
794
+ where in the second last inequality we apply $\langle a,b\rangle \geq -\frac{\alpha}{4}\| a\| ^2 -\frac{1}{\alpha}\| b\| ^2$ with $a = \sqrt{t} (\eta F(z_t) + \eta c_t)$ , $b = \sqrt{t} (z^{*} - z_{0})$ , and $\alpha = t + \frac{1}{2}$ .
795
+
796
+ Proof of Theorem 2. It is equivalent to prove that for every $T \geq 1$ , we have
797
+
798
+ $$
799
+ \left\| \eta F (z _ {T}) + \eta c _ {T} \right\| ^ {2} \leq \frac {6 H ^ {2}}{T ^ {2}}.
800
+ $$
801
+
802
+ From Lemma 3, we have
803
+
804
+ $$
805
+ \left\| \eta F (z _ {1}) + \eta c _ {1} \right\| ^ {2} \leq (1 + \eta L) ^ {2} \| z _ {1} - z _ {0} \| ^ {2} \leq H ^ {2}.
806
+ $$
807
+
808
+ So the theorem holds for $T = 1$ .
809
+
810
+ For any $T\geq 2$ , by Lemma 4 we have
811
+
812
+ $$
813
+ \begin{array}{l} \frac {T \left(T + \frac {1}{2}\right)}{4} \| \eta F (z _ {T}) + \eta c _ {T} \| ^ {2} \leq V _ {T} + \| z _ {0} - z ^ {*} \| ^ {2} \\ \leq V _ {1} + \left\| z _ {0} - z ^ {*} \right\| ^ {2} + \frac {1}{8} \sum_ {t = 2} ^ {T} \left\| \eta F (z _ {t}) + \eta c _ {t} \right\| ^ {2} \\ = H ^ {2} + \frac {1}{8} \sum_ {t = 2} ^ {T} \| \eta F (z _ {t}) + \eta c _ {t} \| ^ {2}. \\ \end{array}
814
+ $$
815
+
816
+ By subtracting $\frac{1}{8}\| \eta F(z_T) + \eta c_T\|^2$ from both sides of the above inequality, we get
817
+
818
+ $$
819
+ \frac {T ^ {2}}{4} \left\| \eta F (z _ {T}) + \eta c _ {T} \right\| ^ {2} \leq H ^ {2} + \frac {1}{8} \sum_ {t = 2} ^ {T - 1} \left\| \eta F (z _ {t}) + \eta c _ {t} \right\| ^ {2}
820
+ $$
821
+
822
+ which is in the form of Proposition 4 with $C_1 = H^2$ and $p = \frac{1}{9}$ . Thus we have for any $T \geq 2$
823
+
824
+ $$
825
+ \left\| \eta F (z _ {T}) + \eta c _ {T} \right\| ^ {2} \leq \frac {6 H ^ {2}}{T ^ {2}}.
826
+ $$
827
+
828
+ ![](images/3062b8b77d64815aefe178f752d3e65d610ea7b94a8dc0bf3a28f2120cf44d4d.jpg)
829
+
830
+ # D MISSING PROOFS IN SECTION 5
831
+
832
+ To prove Theorem 3, our analysis is based on a potential function argument and can be summarized in the following three steps. (1) We construct a potential function and show that it is non-increasing between two consecutive iterates; (2) We prove that the RG algorithm has a best-iterate convergence rate, i.e., for any $T \geq 1$ , there exists one iterate $t^* \in [T]$ such that our potential function at iterate $t^*$ is small; (3) We combine the above steps to show that the last iterate has the same convergence guarantee as the best iterate and derive the $O\left(\frac{1}{\sqrt{T}}\right)$ last-iterate convergence rate.
833
+
834
+ # D.1 NON-INCRESING POTENTIAL
835
+
836
+ Potential Function. We denote
837
+
838
+ $$
839
+ c _ {t + 1} := \frac {z _ {t} - \eta F \left(z _ {t + \frac {1}{2}}\right) - z _ {t + 1}}{\eta}, \forall t \geq 0 \tag {20}
840
+ $$
841
+
842
+ Note that according to the update rule of RG, $z_{t+1} = \Pi_{\mathcal{Z}}[z_t - \eta F(z_{t+\frac{1}{2}})]$ , so $c_{t+1} \in N_{\mathcal{Z}}(z_{t+1})$ .
843
+
844
+ The potential function we adopt is $P_{t}$ defined as
845
+
846
+ $$
847
+ P _ {t} := \left\| F \left(z _ {t}\right) + c _ {t} \right\| ^ {2} + \left\| F \left(z _ {t}\right) - F \left(z _ {t - \frac {1}{2}}\right) \right\| ^ {2}, \forall t \geq 1. \tag {21}
848
+ $$
849
+
850
+ Lemma 5. In the same setup of Theorem 3, $P_{t} \geq P_{t + 1}$ for any $t \geq 1$ .
851
+
852
+ Proof. The plan is to show that $P_{t} - P_{t + 1}$ plus a few non-positive terms is non-negative, which certifies that $P_{t} - P_{t + 1} \geq 0$ .
853
+
854
+ Three Non-Positive Terms. Since $F$ is monotone, we have
855
+
856
+ $$
857
+ (- 2) \cdot \left\langle \eta F \left(z _ {t + 1}\right) - \eta F \left(z _ {t}\right), z _ {t + 1} - z _ {t} \right\rangle \leq 0. \tag {22}
858
+ $$
859
+
860
+ Since $F$ is $L$ -Lipschitz and $0 < \eta < \frac{1}{(1 + \sqrt{2})L} < \frac{1}{2L}$ , we have
861
+
862
+ $$
863
+ (- 2) \cdot \left(\frac {1}{4} \cdot \left\| z _ {t + 1} - z _ {t + \frac {1}{2}} \right\| ^ {2} - \left\| \eta F \left(z _ {t + 1}\right) - \eta F \left(z _ {t + \frac {1}{2}}\right) \right\| ^ {2}\right) \leq 0. \tag {23}
864
+ $$
865
+
866
+ By definition, we have $c_{t+1} \in N_{\mathcal{Z}}(z_{t+1})$ and $c_t \in N_{\mathcal{Z}}(z_t)$ . Since the normal cone operator $N_{\mathcal{Z}}$ is maximally monotone, we have
867
+
868
+ $$
869
+ (- 2) \cdot \left\langle \eta c _ {t + 1} - \eta c _ {t}, z _ {t + 1} - z _ {t} \right\rangle \leq 0. \tag {24}
870
+ $$
871
+
872
+ Sum-of-Squares Identity. We use the following equivalent formations of $z_{t + \frac{1}{2}}$ and $z_{t + 1}$ .
873
+
874
+ $$
875
+ z _ {t + \frac {1}{2}} = 2 z _ {t} - z _ {t - 1} = z _ {t} - (z _ {t - 1} - z _ {t}) = z _ {t} - \eta F (z _ {t - \frac {1}{2}}) - \eta c _ {t},
876
+ $$
877
+
878
+ $$
879
+ z _ {t + 1} = \Pi_ {\mathcal {Z}} \left[ z _ {t} - \eta F \left(z _ {t + \frac {1}{2}}\right) \right] = z _ {t} - \eta F \left(z _ {t + \frac {1}{2}}\right) - \eta c _ {t + 1}.
880
+ $$
881
+
882
+ The following identity holds according to Proposition 3. To see this, we replace $x_{k}$ with $z_{t - 1 + \frac{k}{2}}$ ; replace $y_{k}$ with $\eta F(z_{t - 1 + \frac{k}{2}})$ ; replace $u_{2}$ with $\eta c_{t}$ ; replace $u_{4}$ with $\eta c_{t + 1}$ ; also note that $x_{3} = x_{2} - y_{1} - u_{2}$ and $x_{4} = x_{2} - y_{3} - u_{4}$ hold due to the above equivalent formations of $z_{t + \frac{1}{2}}$ and $z_{t + 1}$ .
883
+
884
+ $\eta^2 \cdot (P_t - P_{t+1}) + \text{LHS of Inequality}(22) + \text{LHS of Inequality}(23) + \text{LHS of Inequality}(24)$
885
+
886
+ $$
887
+ = \left\| \frac {z _ {t + \frac {1}{2}} - z _ {t + 1}}{2} + \eta F (z _ {t - \frac {1}{2}}) - \eta F (z _ {t}) \right\| ^ {2} + \left\| \frac {z _ {t + \frac {1}{2}} + z _ {t + 1}}{2} - z _ {t} + \eta F (z _ {t}) + \eta c _ {t} \right\| ^ {2}.
888
+ $$
889
+
890
+ The right-hand side of the above equality is clearly $\geq 0$ , thus we conclude $P_{t} - P_{t + 1}\geq$ 0.
891
+
892
+ # D.2 BEST-ITERATE CONVERGENCE
893
+
894
+ In this section, we show that for any $T \geq 1$ , there exists some iterate $t^*$ such that $P_{t^*} = O\left(\frac{1}{T}\right)$ , which is implied by $\sum_{t=1}^{T} P_t = O(1)$ . To prove this, we first show $\sum_{t=1}^{T} \|z_{t+\frac{1}{2}} - z_t\|^2 = \sum_{t=1}^{T} \|z_t - z_{t-1}\|^2 = O(1)$ and then relate $\sum_{t=1}^{T} P_t$ to these two quantities.
895
+
896
+ Lemma 6. In the same setup of Theorem 3, for any $T \geq 1$ , we have
897
+
898
+ $$
899
+ \sum_ {t = 1} ^ {T} \left\| z _ {t + \frac {1}{2}} - z _ {t} \right\| ^ {2} = \sum_ {t = 1} ^ {T} \| z _ {t} - z _ {t - 1} \| ^ {2} \leq \frac {H ^ {2}}{1 - (1 + \sqrt {2}) \eta L}.
900
+ $$
901
+
902
+ Proof. First note that by the update rule of RG, we have $z_{t + \frac{1}{2}} = 2z_t - z_{t - 1}$ thus $z_{t + \frac{1}{2}} - z_t = z_t - z_{t - 1}$ . Therefore, it suffices to only prove the inequality for $\sum_{t = 1}^{T}\| z_{t + \frac{1}{2}} - z_t\| ^2$ .
903
+
904
+ From the proof of (Hsieh et al., 2019, Lemma 2), for any $t \geq 1$ and $p \in \mathcal{Z}$ , we have
905
+
906
+ $$
907
+ \begin{array}{l} \left(1 - (1 + \sqrt {2}) \eta L\right) \cdot \left\| z _ {t + \frac {1}{2}} - z _ {t} \right\| ^ {2} \leq \| z _ {t} - p \| ^ {2} - \| z _ {t + 1} - p \| ^ {2} - 2 \eta \left\langle F \left(z _ {t + \frac {1}{2}}\right), z _ {t + \frac {1}{2}} - p \right\rangle \\ + \eta L \left(\left\| z _ {t} - z _ {t - \frac {1}{2}} \right\| ^ {2} - \left\| z _ {t + 1} - z _ {t + \frac {1}{2}} \right\| ^ {2}\right). \tag {25} \\ \end{array}
908
+ $$
909
+
910
+ We set $p = z^*$ to be a solution of the variational inequality (VI) problem in the above inequality. Note that
911
+
912
+ $$
913
+ \begin{array}{l} - 2 \eta \left\langle F \left(z _ {t + \frac {1}{2}}\right), z _ {t + \frac {1}{2}} - z ^ {*} \right\rangle = - 2 \eta \left\langle F \left(z _ {t + \frac {1}{2}}\right) - F \left(z ^ {*}\right), z _ {t + \frac {1}{2}} - z ^ {*} \right\rangle - 2 \eta \left\langle F \left(z ^ {*}\right), z _ {t + \frac {1}{2}} - z ^ {*} \right\rangle \\ \leq - 2 \eta \left\langle F \left(z ^ {*}\right), z _ {t + \frac {1}{2}} - z ^ {*} \right\rangle \quad (F \text {i s m o n o t o n e}) \\ = 2 \eta \left\langle F \left(z ^ {*}\right), z _ {t - 1} - z ^ {*} \right\rangle - 4 \eta \left\langle F \left(z ^ {*}\right), z _ {t} - z ^ {*} \right\rangle \tag {26} \\ \end{array}
914
+ $$
915
+
916
+ where the last equality holds since $z_{t + \frac{1}{2}} = 2z_t - z_{t - 1}$ . Also note that $\langle F(z^{*}),z_{t} - z^{*}\rangle \geq 0$ for all $t\geq 0$ since $z_{t}\in \mathcal{Z}$ and $z^{*}$ is a solution to (VI). Combining Inequality (25) and Inequality (26), telescoping the terms for $t = 1,2,\dots ,T$ , and dividing both sides by $1 - (1 + \sqrt{2})\eta L > 0$ , we get
917
+
918
+ $$
919
+ \sum_ {t = 1} ^ {T} \left\| z _ {t + \frac {1}{2}} - z _ {t} \right\| ^ {2} \leq \frac {\| z _ {1} - z ^ {*} \| ^ {2} + \| z _ {1} - z _ {\frac {1}{2}} \| ^ {2} + 2 \eta \langle F (z ^ {*}) , z _ {0} - z ^ {*} \rangle}{1 - (1 + \sqrt {2}) \eta L}.
920
+ $$
921
+
922
+ To get a cleaner constant that only relies on the starting point $z_0 = z_{\frac{1}{2}}$ , we further simplify the three terms on the right-hand side. Note that since $\eta < \frac{1}{2L}$ and $z_1 = \Pi_{\mathcal{Z}}[z_0 - \eta F(z_0)]$ , we have
923
+
924
+ $$
925
+ \left\| z _ {1} - z _ {\frac {1}{2}} \right\| ^ {2} = \| z _ {1} - z _ {0} \| ^ {2} \leq \eta^ {2} \| F (z _ {0}) \| ^ {2} \leq \frac {4}{L ^ {2}} \| F (z _ {0}) \| ^ {2}.
926
+ $$
927
+
928
+ Thus we have
929
+
930
+ $$
931
+ \| z _ {1} - z ^ {*} \| ^ {2} \leq 2 \| z _ {1} - z _ {0} \| ^ {2} + 2 \| z _ {0} - z ^ {*} \| ^ {2} \leq \frac {8}{L ^ {2}} \| F (z _ {0}) \| ^ {2} + 2 \| z _ {0} - z ^ {*} \| ^ {2}.
932
+ $$
933
+
934
+ Moreover,
935
+
936
+ $$
937
+ \begin{array}{l} 2 \eta \langle F (z ^ {*}), z _ {0} - z ^ {*} \rangle \leq 2 \eta \| F (z ^ {*}) \| \| z _ {0} - z ^ {*} \| \\ \leq 2 \eta \left(\| F (z ^ {*}) - F (z _ {0}) \| + \| F (z _ {0}) \|\right) \| z _ {0} - z ^ {*} \| \quad (\| A \| \leq \| A - B \| + \| B \|) \\ \leq 2 \eta L \| z _ {0} - z ^ {*} \| ^ {2} + 2 \eta \| F (z _ {0}) \| \| z _ {0} - z ^ {*} \| \\ \leq \| z _ {0} - z ^ {*} \| ^ {2} + \frac {1}{L} \| F (z _ {0}) \| \| z _ {0} - z ^ {*} \| \quad (\eta < \frac {1}{2 L}) \\ \leq 2 \| z _ {0} - z ^ {*} \| ^ {2} + \frac {1}{L ^ {2}} \| F (z _ {0}) \| ^ {2}. \quad (2 a b \leq a ^ {2} + b ^ {2}) \\ \end{array}
938
+ $$
939
+
940
+ Thus
941
+
942
+ $$
943
+ \left\| z _ {1} - z ^ {*} \right\| ^ {2} + \left\| z _ {1} - z _ {\frac {1}{2}} \right\| ^ {2} + 2 \eta \langle F (z ^ {*}), z _ {0} - z ^ {*} \rangle \leq \frac {1 3}{L ^ {2}} \| F (z _ {0}) \| ^ {2} + 4 \| z _ {0} - z ^ {*} \| ^ {2} = H ^ {2}.
944
+ $$
945
+
946
+ This completes the proof.
947
+
948
+ Lemma 7. In the same setup of Theorem 3, for any $T \geq 1$ , we have
949
+
950
+ $$
951
+ \sum_ {t = 1} ^ {T} P _ {t} \leq \lambda^ {2} H ^ {2} L ^ {2}.
952
+ $$
953
+
954
+ Proof. We first show an upper bound for $P_{t}$
955
+
956
+ $$
957
+ \begin{array}{l} P _ {t} = \left\| F (z _ {t}) + c _ {t} \right\| ^ {2} + \left\| F (z _ {t}) - F (z _ {t - \frac {1}{2}}) \right\| ^ {2} \\ = \left\| F \left(z _ {t}\right) - F \left(z _ {t - \frac {1}{2}}\right) + \frac {z _ {t} - z _ {t - 1}}{\eta} \right\| ^ {2} + \left\| F \left(z _ {t}\right) - F \left(z _ {t - \frac {1}{2}}\right) \right\| ^ {2} \quad (\text {d e f i n i t i o n o f} c _ {t} (2 0)) \\ \leq 3 \left\| F (z _ {t}) - F (z _ {t - \frac {1}{2}}) \right\| ^ {2} + \frac {2}{\eta^ {2}} \| z _ {t} - z _ {t - 1} \| ^ {2} \quad (\| A + B \| ^ {2} \leq 2 \| A \| ^ {2} + 2 \| B \| ^ {2}) \\ \leq 3 L ^ {2} \left\| z _ {t} - z _ {t - \frac {1}{2}} \right\| ^ {2} + \frac {2}{\eta^ {2}} \left\| z _ {t} - z _ {t - 1} \right\| ^ {2} \quad (F \text {i s} L \text {- L i p s c h i t z}) \\ = 3 L ^ {2} \left\| z _ {t} - z _ {t - 1} + z _ {t - 1} - z _ {t - \frac {1}{2}} \right\| ^ {2} + \frac {2}{\eta^ {2}} \| z _ {t} - z _ {t - 1} \| ^ {2} \\ \leq 6 L ^ {2} \left\| z _ {t - \frac {1}{2}} - z _ {t - 1} \right\| ^ {2} + \left(\frac {2}{\eta^ {2}} + 6 L ^ {2}\right) \| z _ {t} - z _ {t - 1} \| ^ {2} \quad (\| A + B \| ^ {2} \leq 2 \| A \| ^ {2} + 2 \| B \| ^ {2}) \\ \leq \frac {2 + 6 \eta^ {2} L ^ {2}}{\eta^ {2}} \bigg (\left\| z _ {t - \frac {1}{2}} - z _ {t - 1} \right\| ^ {2} + \| z _ {t} - z _ {t - 1} \| ^ {2} \bigg). \\ \end{array}
958
+ $$
959
+
960
+ Summing the above inequality of $t = 1,2,\dots T$ , we get
961
+
962
+ $$
963
+ \begin{array}{l} \sum_ {t = 1} ^ {T} P _ {t} \leq \frac {2 + 6 \eta^ {2} L ^ {2}}{\eta^ {2}} \sum_ {t = 1} ^ {T} \left(\left\| z _ {t - \frac {1}{2}} - z _ {t - 1} \right\| ^ {2} + \| z _ {t} - z _ {t - 1} \| ^ {2}\right) \\ = \frac {2 + 6 \eta^ {2} L ^ {2}}{\eta^ {2}} \left(\| z _ {1} - z _ {0} \| ^ {2} + \sum_ {t = 1} ^ {T - 1} \left(\left\| z _ {t + \frac {1}{2}} - z _ {t} \right\| ^ {2} + \| z _ {t + 1} - z _ {t} \| ^ {2}\right)\right) \\ \leq \frac {2 + 6 \eta^ {2} L ^ {2}}{\eta^ {2}} \left(\| z _ {1} - z _ {0} \| ^ {2} + \frac {2 H ^ {2}}{1 - (1 + \sqrt {2}) \eta L}\right) \\ \leq \frac {6 (1 + 3 \eta^ {2} L ^ {2}) H ^ {2}}{\eta^ {2} (1 - (1 + \sqrt {2}) \eta L)}. \\ \end{array}
964
+ $$
965
+
966
+ The second last inequality holds by Lemma 6. The last inequality holds since $\| z_1 - z_0 \|^2 \leq \frac{4}{L^2} \| F(z_0) \|^2 \leq H^2$ . Recall that $\lambda = \sqrt{\frac{6(1 + 3\eta^2L^2)}{\eta^2L^2(1 - (1 + \sqrt{2})\eta L)}}$ . This completes the proof.
967
+
968
+ # D.3 PROOF OF THEOREM 3
969
+
970
+ Fix any $T \geq 1$ . From Lemma 5, we know that the potential function $P_{t}$ is non-increasing for all $t \geq 1$ . Lemma 7 guarantees that the sum of potential functions $\sum_{t=1}^{T} P_{t}$ is upper bounded by $\lambda^2 H^2 L^2$ , where $\lambda^2 = \frac{6(1 + 3\eta^2L^2)}{\eta^2L^2(1 - (1 + \sqrt{2})\eta L)}$ . Combining the above, we can conclude that the potential function at the last iterate $P_{T}$ is upper bounded by $\frac{\lambda^2H^2L^2}{T}$ . Since $P_{T} = \| F(z_{T}) + c_{T}\|^{2} + \| F(z_{T}) - F(z_{T - \frac{1}{2}})\|^{2}$ , we obtain the last-iterate convergence rate $r_{F,Z}^{tan}(z_{T})^{2} \leq \| F(z_{T}) + c_{T}\|^{2} \leq \frac{\lambda^2H^2L^2}{T}$ .
971
+
972
+ The convergence rate on $\| F(z_{T}) + c_{T}\|^{2}$ implies a convergence rate on the gap function $\mathrm{GAP}_{Z,F,D}(z_T)$ by Lemma 1:
973
+
974
+ $$
975
+ \operatorname {G A P} _ {\mathcal {Z}, F, D} \left(z _ {T}\right) \leq D \cdot \left\| F \left(z _ {T}\right) + c _ {T} \right\| \leq \frac {\lambda D H L}{\sqrt {T}}.
976
+ $$
977
+
978
+ # E NUMERICAL ILLUSTRATION
979
+
980
+ In this section, we conduct numerical experiments to illustrate and compare the performance of several algorithms: Reflected Gradient (RG), Extra Gradient (EG), Accelerated Reflected Gradient (ARG), and Fast Extra Gradient (FEG) (Lee & Kim, 2021a). Among them, ARG and FEG are accelerated algorithms while RG and EG are normal algorithms.
981
+
982
+ Test Problem We use a classical example (Problem 1 in (Malitsky, 2015)) which is unconstrained and the operator $F(z) = Az$ where $A$ is an $n \times n$ matrix that
983
+
984
+ $$
985
+ A (i, j) = \left\{ \begin{array}{l l} 1, & j = n + 1 - i > i \\ - 1, & j = n + 1 - i < i \\ 0, & \text {o t h e r w i s e} \end{array} \right.
986
+ $$
987
+
988
+ Note that $F$ is 1-Lipschitz and its solution is the zero vector $\mathbf{0}$ when $n$ is even.
989
+
990
+ Test Details We run experiments using Python 3.9 on jupyter- notebook, on MacBook Air (M1, 2020) running macOS 12.5.1. Time of execution is measured using the time package in Python. For all tests, we take initial point to be the all-one vector $z_0 = (1, \dots, 1)$ . We denote $\eta$ to be the step size and the termination criteria is the residual (operator norm) $||F(z_t)|| \leq \varepsilon$ . The code can be found in the Supplementary Material.
991
+
992
+ Test Results The results for EG and RG are shown in Figure 1. With step size $\eta = 0.4$ , EG is slower than RG. This is due to the fact that EG makes two gradient calls per iteration. Even with the optimized step size $\eta = 0.7$ which gives the best performance, EG is still slower than RG for this problem. Our results are consistent with numerical results on Mathematica by Malitsky (2015).
993
+
994
+ The results for FEG and ARG are shown in Figure 2. With step size $\eta = 0.5$ , FEG is slower than ARG. With the optimized step size $\eta = 1$ , FEG is a little faster than ARG. So for this problem, the performance of FEG and ARG are comparable. We also remark that for this particular problem, both ARG and FEG are slower than EG or RG. This does not contradict with our theoretical results on worst-case convergence rate. Simple algorithms like RG and EG can be faster than accelerated methods like ARG and FEG for particular problems. This also illustrates the importance of understanding simple algorithms like RG.
995
+
996
+ ![](images/dd90d777d154194284c2fdb27e69710bd2842fbcd7432294c8c55320bb754894.jpg)
997
+ Figure 1: Results for EG and RG when $\varepsilon = 0.001$ . The read line and blue line are EG and RG with step size $\eta = 0.4$ . The yellow line is EG with (approximately) optimized step size $\eta = 0.7$ . We remark that RG would diverge with $\eta = 0.7$ .
998
+
999
+ # F AUXILIARY PROPOSITIONS
1000
+
1001
+ Proposition 3 (Two Identities). Let $(x_{k})_{k\in [4]},(y_{k})_{k\in [4]},x_{0},u_{2}$ and $u_{4}$ be arbitrary vectors in $\mathbb{R}^n$ . Let $k\geq 1$ and $q\in (0,1)$ be two real numbers. If the following two equations hold:
1002
+
1003
+ $$
1004
+ \begin{array}{l} x _ {3} = x _ {2} - y _ {1} - u _ {2} \\ x _ {4} = x _ {2} - y _ {3} - u _ {4} \\ \end{array}
1005
+ $$
1006
+
1007
+ ![](images/8bd89d81caa19701fe3ab1dace1e4222064a22ba84846cf4f4bf4d3772a6de67.jpg)
1008
+ Figure 2: Results for FEG and ARG when $\varepsilon = 0.01$ . The read line and blue line are FEG and ARG with step size $\eta = 0.5$ . The yellow line is FEG with (approximately) optimized step size $\eta = 1$ .
1009
+
1010
+ then the following identity holds:
1011
+
1012
+ $$
1013
+ \begin{array}{l} \left\| y _ {2} + u _ {2} \right\| ^ {2} + \left\| y _ {2} - y _ {1} \right\| ^ {2} - \left\| y _ {4} + u _ {4} \right\| ^ {2} - \left\| y _ {4} - y _ {3} \right\| ^ {2} \\ - 2 \cdot \left\langle y _ {4} - y _ {2}, x _ {4} - x _ {2} \right\rangle \\ - 2 \cdot \left(\frac {1}{4} \cdot \| x _ {4} - x _ {3} \| ^ {2} - \| y _ {4} - y _ {3} \| ^ {2}\right) \\ - 2 \cdot \left\langle u _ {4} - u _ {2}, x _ {4} - x _ {2} \right\rangle \\ = \left\| \frac {x _ {3} - x _ {4}}{2} + y _ {1} - y _ {2} \right\| ^ {2} + \left\| \frac {x _ {3} + x _ {4}}{2} - x _ {2} + y _ {2} + u _ {2} \right\| ^ {2} \\ \end{array}
1014
+ $$
1015
+
1016
+ If the following two equations hold:
1017
+
1018
+ $$
1019
+ \begin{array}{l} x _ {3} = x _ {2} - y _ {1} - u _ {2} + \frac {1}{k + 1} (x _ {0} - x _ {2}) \\ x _ {4} = x _ {2} - y _ {3} - u _ {4} + \frac {1}{k + 1} (x _ {0} - x _ {2}) \\ \end{array}
1020
+ $$
1021
+
1022
+ then the following identity holds:
1023
+
1024
+ $$
1025
+ \begin{array}{l} \frac {k (k + 1)}{2} \left(\left\| y _ {2} + u _ {2} \right\| ^ {2} + \left\| y _ {2} - y _ {1} \right\| ^ {2}\right) + k \langle y _ {2} + u _ {2}, x _ {2} - x _ {0} \rangle \\ - \frac {(k + 1) (k + 2)}{2} \left(\left\| y _ {4} + u _ {4} \right\| ^ {2} + \left\| y _ {4} - y _ {3} \right\| ^ {2}\right) - (k + 1) \langle y _ {4} + u _ {4}, x _ {4} - x _ {0} \rangle \\ - k (k + 1) \cdot \left\langle y _ {4} + u _ {4} - y _ {2} - u _ {2}, x _ {4} - x _ {2} \right\rangle \\ \left. - \frac {k (k + 1)}{4 q} \cdot \left\langle q \cdot \| x _ {4} - x _ {3} \| ^ {2} - \| y _ {4} - y _ {3} \| ^ {2} \right\rangle \right. \\ = \frac {k (k + 1)}{4} \cdot \left\| u _ {4} - u _ {2} + y _ {1} - 2 y _ {2} + y _ {3} \right\| ^ {2} \\ + \left(\frac {(1 - 4 q) k - 4 q}{4 q} (k + 1)\right) \cdot \| y _ {3} - y _ {4} \| ^ {2} \\ + (k + 1) \cdot \langle y _ {3} - y _ {4}, y _ {4} + u _ {4} \rangle \\ \end{array}
1026
+ $$
1027
+
1028
+ Proof. We verify the two identities by MATLAB. The code is available at
1029
+
1030
+ https://github.com/weiqiangzheng1999/Single-Call.
1031
+
1032
+ ![](images/53e324142b94aeab4031b1bd88aa4f19127333c84c863d6b1e5ebce0c173d94f.jpg)
1033
+
1034
+ Proposition 4 ((Cai et al., 2022a)). Let $\{a_{k} \in \mathbb{R}^{+}\}_{k \geq 2}$ be a sequence of real numbers. Let $C_{1} \geq 0$ and $p \in (0, \frac{1}{3})$ be two real numbers. If the following condition holds for every $k \geq 2$ ,
1035
+
1036
+ $$
1037
+ \frac {k ^ {2}}{4} \cdot a _ {k} \leq C _ {1} + \frac {p}{1 - p} \cdot \sum_ {t = 2} ^ {k - 1} a _ {t}, \tag {27}
1038
+ $$
1039
+
1040
+ then for each $k \geq 2$ we have
1041
+
1042
+ $$
1043
+ a _ {k} \leq \frac {4 \cdot C _ {1}}{1 - 3 p} \cdot \frac {1}{k ^ {2}}. \tag {28}
1044
+ $$
1045
+
1046
+ Proof. We prove the statement by induction.
1047
+
1048
+ Base Case: $k = 2$ . From Inequality (27), we have
1049
+
1050
+ $$
1051
+ \frac {2 ^ {2}}{4} \cdot a _ {2} \leq C _ {1} \Rightarrow a _ {2} \leq C _ {1} \leq \frac {4 \cdot C _ {1}}{1 - 3 p} \cdot \frac {1}{2 ^ {2}}.
1052
+ $$
1053
+
1054
+ Thus, Inequality (28) holds for $k = 2$ .
1055
+
1056
+ Inductive Step: for any $k \geq 3$ . Fix some $k \geq 3$ and assume that Inequality (28) holds for all $2 \leq t \leq k - 1$ . We slightly abuse notation and treat the summation in the form $\sum_{t=3}^{2}$ as 0. By Inequality (27), we have
1057
+
1058
+ $$
1059
+ \begin{array}{l} \frac {k ^ {2}}{4} \cdot a _ {k} \leq C _ {1} + \frac {p}{1 - p} \cdot \sum_ {t = 2} ^ {k - 1} a _ {t} \\ \leq \frac {C _ {1}}{1 - p} + \frac {p}{1 - p} \cdot \sum_ {t = 3} ^ {k - 1} a _ {t} \quad (a _ {2} \leq C _ {1}) \\ \leq \frac {C _ {1}}{1 - p} + \frac {4 p \cdot C _ {1}}{(1 - p) (1 - 3 p)} \cdot \sum_ {t = 3} ^ {k - 1} \frac {1}{t ^ {2}} \quad (\text {I n d u c t i o n a s s u m p t i o n o n I n e q u a l i t y (2 8)}) \\ \leq \frac {C _ {1}}{1 - p} + \frac {2 p \cdot C _ {1}}{(1 - p) (1 - 3 p)} \quad \left(\sum_ {t = 3} ^ {\infty} \frac {1}{t ^ {2}} = \frac {\pi^ {2}}{6} - \frac {5}{4} \leq \frac {1}{2}\right) \\ = \frac {C _ {1}}{1 - 3 p}. \\ \end{array}
1060
+ $$
1061
+
1062
+ This completes the induction step. Therefore, for all $k \geq 2$ , we have $a_k \leq \frac{4 \cdot C_1}{1 - 3p} \cdot \frac{1}{k^2}$ .
acceleratedsinglecallmethodsforconstrainedminmaxoptimization/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04718b213da6d14c0b7f7cec2acaddefae011dce38a0e4a6d19fcbce55bf9dc8
3
+ size 1285103
acceleratedsinglecallmethodsforconstrainedminmaxoptimization/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7004b9d6d2ad5d0c4f6fca4a6c900f538d730402890430e8f2ec792919b3726f
3
+ size 1194866
acceleratingguideddiffusionsamplingwithsplittingnumericalmethods/8b2761bf-1959-4c5c-997a-497b6f8b1ecb_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80ff9d4de38ded7e733542ce7d0dceebde5a379342038f76f6b5558c3ad99ccc
3
+ size 75241
acceleratingguideddiffusionsamplingwithsplittingnumericalmethods/8b2761bf-1959-4c5c-997a-497b6f8b1ecb_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91068d76274aca8af703e572e585042843cb981c89882368fddd33b3df34aaae
3
+ size 90579
acceleratingguideddiffusionsamplingwithsplittingnumericalmethods/8b2761bf-1959-4c5c-997a-497b6f8b1ecb_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:691df45b58e4766399f676505b61f40ef05ddf7bb4c6ad7855de8d04cb60b095
3
+ size 12923828
acceleratingguideddiffusionsamplingwithsplittingnumericalmethods/full.md ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ACCELERATING GUIDED DIFFUSION SAMPLING WITH SPLITTING NUMERICAL METHODS
2
+
3
+ Suttisak Wizadwongsa, Supasorn Suwajanakorn
4
+
5
+ VISTEC, Thailand
6
+
7
+ {suttisak.w_s19,supasorn.s} $@$ vistec.ac.th
8
+
9
+ # ABSTRACT
10
+
11
+ Guided diffusion is a technique for conditioning the output of a diffusion model at sampling time without retraining the network for each specific task. However, one drawback of diffusion models, whether they are guided or unguided, is their slow sampling process. Recent techniques can accelerate unguided sampling by applying high-order numerical methods to the sampling process when viewed as differential equations. On the contrary, we discover that the same techniques do not work for guided sampling, and little has been explored about its acceleration. This paper explores the culprit of this problem and provides a solution based on operator splitting methods, motivated by our key finding that classical high-order numerical methods are unsuitable for the conditional function. Our proposed method can re-utilize the high-order methods for guided sampling and can generate images with the same quality as a 250-step DDIM baseline using $32 - 58\%$ less sampling time on ImageNet256. We also demonstrate usage on a wide variety of conditional generation tasks, such as text-to-image generation, colorization, inpainting, and super-resolution.
12
+
13
+ # 1 INTRODUCTION
14
+
15
+ A family of generative models known as diffusion models has recently gained a lot of attention with state-of-the-art image generation quality (Dhariwal & Nichol, 2021). Guided diffusion is an approach for controlling the output of a trained diffusion model for conditional generation tasks without retraining its network. By engineering a task-specific conditional function and modifying only the sampling procedure, guided diffusion models can be used in a variety of applications, such as class-conditional image generation (Dhariwal & Nichol, 2021; Kawar et al., 2022), text-to-image generation (Nichol et al., 2022), image-to-image translation (Zhao et al., 2022), inpainting (Chung et al., 2022a), colorization (Song et al., 2020b), image composition (Sasaki et al., 2021), adversarial purification (Wang et al., 2022; Wu et al., 2022) and super-resolution (Choi et al., 2021).
16
+
17
+ One common drawback of both guided and regular "unguided" diffusion models is their slow sampling processes, usually requiring hundreds of iterations to produce a single image. Recent speed-up attempts include improving the noise schedule (Nichol & Dhariwal, 2021; Watson et al., 2021), redefining the diffusion process to be non-Markovian, thereby allowing a deterministic sampling process Song et al. (2020a), network distillation that teaches a student model to simulate multiple sampling steps of a teacher model Salimans & Ho (2022); Luhman & Luhman (2021), among others. Song et al. (2020a) show how each sampling step can be expressed as a first-order numerical step of an ordinary differential equation (ODE). Similarly, Song et al. (2020b) express the sampling of a score-based model as solving a stochastic differential equation (SDE). By regarding the sampling process as an ODE/SDE, many high-order numerical methods have been suggested, such as Liu et al. (2022), Zhang & Chen (2022), and Zhang et al. (2022) with impressive results on unguided diffusion models. However, when applied to guided diffusion models, these methods produce surprisingly poor results (see Figure 1)—given a few number of steps, those high-order numerical methods actually perform worse than low-order methods.
18
+
19
+ Guided sampling differs from the unguided one by the addition of the gradients of the conditional function to its sampling equation. The observed performance decline thus suggests that classical high-order methods may not be suitable for the conditional function and, consequently, the guided
20
+
21
+ ![](images/1f4009ed90fa3ceaffa8ba7070c653524c1e61574c3715f5e742f31fdfebdf2b.jpg)
22
+ Figure 1: Generated samples of a classifier-guided diffusion model trained on ImageNet256 using 8-256 sampling steps from different sampling methods. Our technique, STSP4, produces high-quality results in a fewer number of steps.
23
+
24
+ sampling equation as a whole. Our paper tests this hypothesis and presents an approach to accelerating guided diffusion sampling. The key idea is to use an operator splitting method to split the less well-behaved conditional function term from the standard diffusion term and solve them separately. This approach not only allows re-utilizing the successful high-order methods on the diffusion term but also provides us with options to combine different specialized methods for each term to maximize performance. Note that splitting methods have also been explored by Dockhorn et al. (2022) to solve unguided diffusion SDEs, but our work focuses on accelerating guided diffusion ODEs.
25
+
26
+ Our design process includes comparing different splitting methods and numerical methods for each split term. When tested on ImageNet, our approach achieves the same level of image quality as a DDIM baseline while reducing the sampling time by approximately $32 - 58\%$ . Compared with other sampling methods using the same sampling time, our approach provides better image quality as measured by LPIPS, FID, and Perception/Recall. With only minimal modifications to the sampling equation, we also show successful acceleration on various conditional generation tasks.
27
+
28
+ # 2 BACKGROUND
29
+
30
+ This section provides a high-level summary of the theoretical foundation of diffusion models as well as numerical methods that have been used for diffusion models. Here we briefly explain a few that contribute to our method.
31
+
32
+ # 2.1 DIFFUSION MODELS
33
+
34
+ Assuming that $x_0$ is a random variable from the data distribution we wish to reproduce, diffusion models define a sequence of Gaussian noise degradation of $x_0$ as random variables $x_1, x_2, \ldots, x_T$ , where $x_t \sim \mathcal{N}(\sqrt{1 - \beta_t} x_{t-1}, \beta_t \mathbf{I})$ and $\beta_t \in [0, 1]$ are parameters that control the noise levels. With a property of Gaussian distribution, we can express $x_t$ directly as a function of $x_0$ and noise $\epsilon \sim \mathcal{N}(0, \mathbf{I})$ by $x_t = \sqrt{\bar{\alpha}_t} x_0 + \sqrt{1 - \bar{\alpha}_t} \epsilon$ , where $\bar{\alpha}_t = \prod_{i=1}^t (1 - \beta_i)$ . By picking a sufficiently large $T$ (e.g., 1,000) and an appropriate set of $\beta_t$ , we can assume $x_T$ is a standard Gaussian distribution. The main idea of diffusion model generation is to sample a Gaussian noise $x_T$ and use it to reversely sample $x_{T-1}, x_{T-2}, \ldots$ until we obtain $x_0$ , which belongs to our data distribution.
35
+
36
+ Ho et al. (2020) propose Denoising Diffusion Probabilistic Model (DDPM) and explain how to employ a neural network $\epsilon_{\theta}(x_t,t)$ to predict the noise $\epsilon$ that is used to compute $x_{t}$ . To train the network, we sample a training image $x_0,t$ , and $\epsilon$ to compute $x_{t}$ using the above relationship. Then, we optimize our network $\epsilon_{\theta}$ to minimize the difference between the predicted and real noise, i.e., $\| \epsilon -\epsilon_{\theta}(x_t,t)\| ^2$ .
37
+
38
+ Song et al. (2020a) introduce Denoising Diffusion Implicit Model (DDIM), which uses the network $\epsilon_{\theta}$ to deterministically obtain $x_{t - 1}$ given $x_{t}$ . The DDIM generative process can be written as
39
+
40
+ $$
41
+ x _ {t - 1} = \sqrt {\frac {\bar {\alpha} _ {t - 1}}{\bar {\alpha} _ {t}}} \left(x _ {t} - \sqrt {1 - \bar {\alpha} _ {t}} \epsilon_ {\theta} \left(x _ {t}, t\right)\right) + \sqrt {1 - \bar {\alpha} _ {t - 1}} \epsilon_ {\theta} \left(x _ {t}, t\right). \tag {1}
42
+ $$
43
+
44
+ This formulation could be used to skip many sampling steps and boost sampling speed. To turn this into an ODE, we rewrite Equation 1 as:
45
+
46
+ $$
47
+ \frac {x _ {t - \Delta t}}{\sqrt {\bar {\alpha} _ {t - \Delta t}}} = \frac {x _ {t}}{\sqrt {\bar {\alpha} _ {t}}} + \left(\sqrt {\frac {1 - \bar {\alpha} _ {t - \Delta t}}{\bar {\alpha} _ {t - \Delta t}}} - \sqrt {\frac {1 - \bar {\alpha} _ {t}}{\bar {\alpha} _ {t}}}\right) \epsilon_ {\theta} (x _ {t}, t), \tag {2}
48
+ $$
49
+
50
+ which is now equivalent to a numerical step in solving an ODE. To derive the corresponding ODE, we can re-parameterize $\sigma_t = \sqrt{1 - \bar{\alpha}_t} / \sqrt{\bar{\alpha}_t}$ , $\bar{x}(t) = x_t / \sqrt{\bar{\alpha}_t}$ and $\bar{\epsilon}_{\sigma}(\bar{x}) = \epsilon_{\theta}(x_t, t)$ , yielding $\bar{x}(t - \Delta t) - \bar{x}(t) = (\sigma_{t - \Delta t} - \sigma_t)\bar{\epsilon}_{\sigma}(\bar{x})$ . By letting $(\sigma_{t - \Delta t} - \sigma_t) \to 0$ , the ODE becomes:
51
+
52
+ $$
53
+ \frac {d \bar {x}}{d \sigma} = \bar {\epsilon} _ {\sigma} (\bar {x}). \tag {3}
54
+ $$
55
+
56
+ Note that this change of variables is equivalent to an exponential integrator technique described in both Zhang & Chen (2022) and Lu et al. (2022). Since $x_{t}$ and $\bar{x}(t)$ have the same value at $t = 0$ , our work can focus on solving $\bar{x}(t)$ rather than $x_{t}$ . Many numerical methods can be applied to the ODE Equation 3 to accelerate diffusion sampling. We next discuss some of them that are relevant.
57
+
58
+ # 2.2 NUMERICAL METHODS
59
+
60
+ Euler's Method is the most basic numerical method. A forward Euler step is given by $\bar{x}_{n + 1} = \bar{x}_n + \Delta \sigma \bar{\epsilon}_\sigma (\bar{x}_n)$ . When the forward Euler step is applied to the ODE Equation 3, we obtain the DDIM formulation (Song et al., 2020a).
61
+
62
+ Heun's Method, also known as the trapezoid rule or improved Euler, is given by: $\bar{x}_{n + 1} = \bar{x}_n + \frac{\Delta\sigma}{2} (e_1 + e_2)$ , where $e_1 = \bar{\epsilon}_{\sigma}(\bar{x}_n)$ and $e_2 = \bar{\epsilon}_{\sigma}(\bar{x}_n + \Delta \sigma e_1)$ . This method splits Euler's method into two steps to improve accuracy. Many papers have used this method on diffusion models, including Algorithm 1 in Karras et al. (2022) and DPM-Solver-2 in Lu et al. (2022). This method is also the simplest case of Predictor-Corrector methods used in Song et al. (2020b).
63
+
64
+ Runge-Kutta Methods represent a class of numerical methods that integrate information from multiple hidden steps and provide high accuracy results. Heun's method also belongs to a family of $2^{\mathrm{nd}}$ -order Runge-Kutta methods (RK2). The most well-known variant is the $4^{\mathrm{th}}$ -order Runge-Kutta method (RK4), which is written as follows:
65
+
66
+ $$
67
+ e _ {1} = \bar {\epsilon} _ {\sigma} (\bar {x} _ {n}), \quad e _ {2} = \bar {\epsilon} _ {\sigma} \left(\bar {x} _ {n} + \frac {\Delta \sigma}{2} e _ {1}\right), \quad e _ {3} = \bar {\epsilon} _ {\sigma} \left(\bar {x} _ {n} + \frac {\Delta \sigma}{2} e _ {2}\right), \quad e _ {4} = \bar {\epsilon} _ {\sigma} \left(\bar {x} _ {n} + \Delta \sigma e _ {3}\right),
68
+ $$
69
+
70
+ $$
71
+ \bar {x} _ {n + 1} = \bar {x} _ {n} + \frac {\Delta \sigma}{6} \left(e _ {1} + 2 e _ {2} + 2 e _ {3} + e _ {4}\right). \tag {4}
72
+ $$
73
+
74
+ This method has been tested on diffusion models in Liu et al. (2022) and Salimans & Ho (2022), but it has not been used as the main proposed method in any paper.
75
+
76
+ Linear Multi-Step Method, similar to the Runge-Kutta methods, aims to combine information from several steps. However, rather than evaluating new hidden steps, this method uses the previous steps to estimate the new step. The $1^{\mathrm{st}}$ -order formulation is the same as Euler's method. The $2^{\mathrm{nd}}$ -order formulation is given by
77
+
78
+ $$
79
+ \bar {x} _ {n + 1} = \bar {x} _ {n} + \frac {\Delta \sigma}{2} \left(3 e _ {0} - e _ {1}\right), \tag {5}
80
+ $$
81
+
82
+ while the $4^{\mathrm{th}}$ -order formulation is given by
83
+
84
+ $$
85
+ \bar {x} _ {n + 1} = \bar {x} _ {n} + \frac {\Delta \sigma}{2 4} \left(5 5 e _ {0} - 5 9 e _ {1} + 3 7 e _ {2} - 9 e _ {3}\right), \tag {6}
86
+ $$
87
+
88
+ where $e_k = \bar{\epsilon}_{\sigma}(\bar{x}_{n - k})$ . These formulations are designed for a constant $\Delta \sigma$ in each step. However, our experiments and previous work that uses this method (e.g., Liu et al. (2022); Zhang & Chen
89
+
90
+ (2022)) still show good results when this assumption is not strictly satisfied, i.e., when $\Delta \sigma$ is not constant. We will refer to these formulations as PLMS (Pseudo Linear Multi-Step) for the rest of the paper, like in Liu et al. (2022). A similar linear multi-step method for non-constant $\Delta \sigma$ can also be derived using a technique used in Zhang & Chen (2022), which we detail in Appendix B. This non-constant version can improve upon PLMS slightly, but it is not as flexible because we have to re-derive the update rule every time the $\sigma$ schedule changes.
91
+
92
+ # 3 SPLITTING METHODS FOR GUIDED DIFFUSION MODELS
93
+
94
+ This section introduces our technique that uses splitting numerical methods to accelerate guided diffusion sampling. We first focus our investigation on classifier-guided diffusion models for class-conditional generation and later demonstrate how this technique can be used for other conditional generation tasks in Section 4.3. Like any guided diffusion models, classifier-guided models (Dhariwal & Nichol, 2021) share the same training objective with regular unguided models with no modifications to the training procedure; but the sampling process is guided by an additional gradient signal from an external classifier to generate class-specific output images. Specifically, the sampling process is given by
95
+
96
+ $$
97
+ \hat {\epsilon} = \epsilon_ {\theta} (x _ {t}) - \sqrt {1 - \bar {\alpha} _ {t}} \nabla_ {x} \log p _ {\phi} (c | x _ {t}), \quad x _ {t - 1} = \sqrt {\bar {\alpha} _ {t - 1}} \left(\frac {x _ {t} - \sqrt {1 - \bar {\alpha} _ {t}} \hat {\epsilon}}{\sqrt {\bar {\alpha} _ {t}}}\right) + \sqrt {1 - \bar {\alpha} _ {t - 1}} \hat {\epsilon}, (7)
98
+ $$
99
+
100
+ where $p_{\phi}(c|x_t)$ is a classifier model trained to output the probability of $x_{t}$ belonging to class $c$ . As discussed in the previous section, we can rewrite this formulation as a "guided ODE":
101
+
102
+ $$
103
+ \frac {d \bar {x}}{d \sigma} = \bar {\epsilon} _ {\sigma} (\bar {x}) - \nabla f _ {\sigma} (\bar {x}), \tag {8}
104
+ $$
105
+
106
+ where $f_{\sigma}(\bar{x}) = \frac{\sigma}{\sqrt{\sigma^2 + 1}}\log p_{\phi}(c|x_t)$ . We refer to $f_{\sigma}$ as the conditional function, which can be substituted with other functions for different tasks. After obtaining the ODE form, any numerical solver mentioned earlier can be readily applied to accelerate the sampling process. However, we observe that classical high-order numerical methods (e.g., PLMS4, RK4) fail to accelerate this task (see Figure 1) and even perform worse than the baseline DDIM.
107
+
108
+ We hypothesize that the two terms in the guided ODE may have different numerical behaviors with the conditional term being less suitable to classical high-order methods. We speculate that the difference could be partly attributed to how they are computed: $\nabla f_{\sigma}(\bar{x})$ is computed through backpropagation, whereas $\bar{\epsilon}_{\sigma}(\bar{x})$ is computed directly by evaluating a network. One possible solution to handle terms with different behaviors is the so-called operator splitting method, which divides the problem into two subproblems:
109
+
110
+ $$
111
+ \frac {d y}{d \sigma} = \bar {\epsilon} _ {\sigma} (y), \quad \frac {d z}{d \sigma} = - \nabla f _ {\sigma} (z). \tag {9}
112
+ $$
113
+
114
+ We call these the diffusion and condition subproblems, respectively. This method allows separating the hard-to-approximate $\nabla f_{\sigma}(z)$ from $\bar{\epsilon}_{\sigma}(y)$ and solving them separately in each time step. Importantly, this helps reintroduce the effective use of high-order methods on the diffusion subproblem as well as provides us with options to combine different specialized methods to maximize performance. We explore two most famous first- and second-order splitting techniques for our task:
115
+
116
+ # 3.1 LIE-TROTTER SPLITTING (LTSP)
117
+
118
+ Our first example is the simple first-order Lie-Trotter splitting method (Trotter, 1959), which expresses the splitting as
119
+
120
+ $$
121
+ \frac {d y}{d \sigma} = \bar {\epsilon} _ {\sigma} (y), \quad y \left(\sigma_ {n}\right) = \bar {x} _ {n}, \quad \sigma \in \left[ \sigma_ {n + 1}, \sigma_ {n} \right] \tag {10}
122
+ $$
123
+
124
+ $$
125
+ \frac {d z}{d \sigma} = - \nabla f _ {\sigma} (z), \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \sigma \in [ \sigma_ {n + 1}, \sigma_ {n} ] \tag {11}
126
+ $$
127
+
128
+ with the solution of this step being $\bar{x}_{n + 1} = z(\sigma_{n + 1})$ . Note that $\sigma_{n}$ is a decreasing sequence. Here Equation 10 is the same as Equation 3, which can be solved using any high-order numerical method,
129
+
130
+ Algorithm 1: Lie-Trotter Splitting (LTSP)
131
+
132
+ sample $\overline{x}_0\sim \mathcal{N}(0,\sigma_{\max}^2\mathbf{I})$
133
+
134
+ for $n\in \{0,\dots ,N - 1\}$ do $y_{n + 1} = \mathrm{PLMS}(\bar{x}_n,\sigma_n,\sigma_{n + 1},\bar{\epsilon}_{\sigma});$ $\bar{x}_{n + 1} = y_{n + 1} - (\sigma_{n + 1} - \sigma_n)\nabla f(y_{n + 1})$
135
+
136
+ end
137
+
138
+ Result: $\bar{x}_N$
139
+
140
+ Algorithm 2: Strang Splitting (STSP)
141
+
142
+ sample $\bar{x}_0\sim \mathcal{N}(0,\sigma_{\mathrm{max}}^2\mathbf{I})$
143
+
144
+ for $n\in \{0,\dots,N - 1\}$ do
145
+
146
+ $$
147
+ z _ {n + 1} = \bar {x} _ {n} - \frac {\left(\sigma_ {n + 1} - \sigma_ {n}\right)}{2} \nabla f (\bar {x} _ {n});
148
+ $$
149
+
150
+ $$
151
+ y _ {n + 1} = \operatorname {P L M S} \left(z _ {n + 1}, \sigma_ {n}, \sigma_ {n + 1}, \bar {\epsilon} _ {\sigma}\right);
152
+ $$
153
+
154
+ $$
155
+ \bar {x} _ {n + 1} = y _ {n + 1} - \frac {\left(\sigma_ {n + 1} - \sigma_ {n}\right)}{2} \nabla f \left(y _ {n + 1}\right);
156
+ $$
157
+
158
+ end
159
+
160
+ Result: $\bar{x}_N$
161
+
162
+ e.g., PLMS. For Equation 11, we can use a forward Euler step:
163
+
164
+ $$
165
+ z _ {n + 1} = z _ {n} - \Delta \sigma \nabla f _ {\sigma} \left(z _ {n}\right). \tag {12}
166
+ $$
167
+
168
+ This is equivalent to a single iteration of standard gradient descent with a learning rate $\Delta \sigma$ . This splitting scheme is summarized by Algorithm 1. We investigate different numerical methods for each subproblem in Section 4.1.
169
+
170
+ # 3.2 STRANG SPLITTING (STSP)
171
+
172
+ Strang splitting (or Strang-Marchuk) (Strang, 1968) is one of the most famous and widely used operator splitting methods. This second-order splitting works as follows:
173
+
174
+ $$
175
+ \frac {d z}{d \sigma} = - \nabla f _ {\sigma} (z), \quad z (\sigma_ {n}) = \bar {x} _ {n}, \quad \sigma \in \left[ \frac {1}{2} (\sigma_ {n} + \sigma_ {n + 1}), \sigma_ {n} \right] \tag {13}
176
+ $$
177
+
178
+ $$
179
+ \frac {d y}{d \sigma} = \bar {\epsilon} _ {\sigma} (y), \quad y (\sigma_ {n}) = z \left(\frac {1}{2} \left(\sigma_ {n} + \sigma_ {n + 1}\right)\right), \quad \sigma \in [ \sigma_ {n + 1}, \sigma_ {n} ] \tag {14}
180
+ $$
181
+
182
+ $$
183
+ \frac {d \tilde {z}}{d \sigma} = - \nabla f _ {\sigma} (\tilde {z}), \quad \tilde {z} \left(\frac {1}{2} \left(\sigma_ {n} + \sigma_ {n + 1}\right)\right) = y \left(\sigma_ {n + 1}\right), \quad \sigma \in \left[ \sigma_ {n + 1}, \frac {1}{2} \left(\sigma_ {n} + \sigma_ {n + 1}\right) \right] \tag {15}
184
+ $$
185
+
186
+ Instead of solving each subproblem for a full step length, we solve the condition subproblem for half a step before and after solving the diffusion subproblem for a full step. In theory, we can swap the order of operations without affecting convergence, but it is practically cheaper to compute the condition term twice rather than the diffusion term twice because $f_{\sigma}$ is typically a smaller network compared to $\bar{\epsilon}_{\sigma}$ . The Strange splitting algorithm is shown in Algorithm 2. This method can be shown to have better accuracy than the Lie-Trotter method, as proven in Appendix N. Although it requires evaluating the condition term twice per step in exchange for improved image quality. We assess this trade-off in the experiment section.
187
+
188
+ # 4 EXPERIMENTS
189
+
190
+ Extending on our observation that classical high-order methods failed on guided sampling, we conducted a series of experiments to investigate this problem and evaluate our solution. Section 4.1 uses a simple splitting method (first-order LTSP) to study the effects that high-order methods have on each subproblem, leading to our key finding that only the conditional subproblem is less suited to classical high-order methods. This section also determines the best combination of numerical methods for the two subproblems under LTSP splitting. Section 4.2 explores improvements from using a higher-order splitting method and compares our best scheme to previous work. Finally, Section 4.3 applies our approach to a variety of conditional generation tasks with minimal changes.
191
+
192
+ For our comparison, we use pre-trained state-of-the-art diffusion models and classifiers from Dhariwal & Nichol (2021), which were trained on the ImageNet dataset (Russakovsky et al., 2015) with 1,000 total sampling steps. We treat full-path samples from a classifier-guided DDIM at 1,000 steps as reference solutions. Then, the performance of each configuration is measured by the image similarity between its generated samples using fewer steps and the reference DDIM samples, both starting from the same initial noise map. Given the same sampling time, we expect configurations with better performance to better match the full DDIM. We measure image similarity using Learned Perceptual Image Patch Similarity (LPIPS) (Zhang et al., 2018) (lower is better) and measure sampling time on a single NVIDIA RTX 3090 and a 24-core AMD Threadripper 3960x.
193
+
194
+ ![](images/7ef9c4a4a72b5becac66d0781bcd74dc132e8bb8a267350a52cffaee2b7981e7.jpg)
195
+ (a) Varying the method for the diffusion subproblem
196
+
197
+ ![](images/b669874ae38e145328d0f3e885a205d8d92ac5ec70b1dee7aa94fc9c34333c32.jpg)
198
+ (b) Varying the method for the condition subproblem
199
+ Figure 2: Comparison of different combinations of numerical methods under LTSP splitting for guided diffusion sampling. We plot LPIPS against the sampling time. [A, B] denotes the use of method A in the diffusion subproblem and method B in the condition subproblem. The red dotted lines indicate a reference DDIM score obtained from 250 sampling steps, which produce images visually close to those from 1,000 steps.
200
+
201
+ # 4.1 FINDING A SUITABLE NUMERICAL METHOD FOR EACH SUBPROBLEM
202
+
203
+ To study the effects of different numerical methods on each subproblem of the guided ODE (Equation 8), we use the simplest Lie-Trotter splitting, which itself requires no additional network evaluations. This controlled experiment has two setups: a) we fix the numerical method for the condition subproblem (Equation 11) to first-order PLMS1 (Euler's method) and vary the numerical method for the diffusion subproblem (Equation 10), and conversely b) we fix the method for the diffusion subproblem and vary the method for the condition subproblem. The numerical method options are Euler's method (PLMS1), Heun's method (RK2), $4^{\text{th}}$ order Runge-Kutta's method (RK4), and $2^{\text{nd}}/4^{\text{th}}$ order pseudo linear multi-step (PLMS2/PLMS4). We report LPIPS vs. sampling time of various numerical combinations on a diffusion model trained on ImageNet $256 \times 256$ in Figure 2. The red dotted lines indicate a reference DDIM score obtained from 250 sampling steps, a common choice that produces good samples that are perceptually close to those from a full 1,000-step DDIM (Dhariwal & Nichol, 2021; Nichol & Dhariwal, 2021).
204
+
205
+ Given a long sampling time, non-split PLMS4 performs better than the DDIM baseline. However, when the sampling time is reduced, the image quality of PLMS4 rapidly decreases and becomes much worse than that of DDIM, especially under 15 seconds in Figure 2. When we split the ODE and solve both subproblems using first-order PLMS1 (Euler), the performance is close to that of DDIM, which is also considered first-order but without any splitting. This helps verify that merely splitting the ODE does not significantly alter the sampling speed.
206
+
207
+ In the setup a), when RK2 and RK4 are used for the diffusion subproblem, they also perform worse than the DDIM baseline. This slowdown is caused by the additional evaluations of the network by these methods, which outweigh the improvement gained in each longer diffusion step. Note that if we instead measure the image quality with respect to the number of diffusion steps, RK2 and RK4 can outperform other methods (Appendix E); however, this is not our metric of interest. On the other hand, PLMS2 and PLMS4, which require no additional network evaluations, are about $8 - 10\%$ faster than DDIM and can achieve the same LPIPS score as the DDIM that uses 250 sampling steps in 20-26 fewer steps. Importantly, when the sampling time is reduced, their performance does not degrade rapidly like the non-split PLMS4 and remains at the same level as DDIM.
208
+
209
+ In the setup b) where we vary the numerical method for the condition subproblem, the result reveals an interesting contrast—none of the methods beats DDIM and some even make the sampling diverged [PLMS1, RK4]. These findings suggest that the gradients of conditional functions are less "compatible" with classical high-order methods, especially when used with a small number of steps. This phenomenon may be related to the "stiffness" condition of ODEs, which we discuss further in Section 5. For the remainder of our experiments, we will use the combination [PLMS4, PLMS1] for the diffusion and condition subproblems, respectively.
210
+
211
+ ![](images/080d5118ad6f234af0c26fd0e8f4d3fb2dd8c1e02cb7b30b92bcdb67b9b7b871.jpg)
212
+ Figure 3: Comparison of different numerical methods for guided diffusion sampling.
213
+
214
+ <table><tr><td></td><td colspan="4">Sampling time within</td></tr><tr><td></td><td>5 sec.</td><td>10 sec.</td><td>15 sec.</td><td>20 sec.</td></tr><tr><td>DDIM</td><td>0.116</td><td>0.062</td><td>0.043</td><td>0.033</td></tr><tr><td>PLMS4</td><td>0.278</td><td>0.141</td><td>0.057</td><td>0.026</td></tr><tr><td>RK2</td><td>0.193</td><td>0.059</td><td>0.036</td><td>0.028</td></tr><tr><td>RK4</td><td>0.216</td><td>0.054</td><td>0.039</td><td>0.028</td></tr><tr><td>LTSP4</td><td>0.121</td><td>0.058</td><td>0.037</td><td>0.028</td></tr><tr><td>STSP4</td><td>0.079</td><td>0.035</td><td>0.022</td><td>0.013</td></tr></table>
215
+
216
+ Table 1: Average LPIPS when the sampling time is limited to be under 5 - 20 seconds.
217
+
218
+ # 4.2 IMPROVED SPLITTING METHOD
219
+
220
+ This experiment investigates improvements from using a high-order splitting method, specifically the Strang splitting method, with the numerical combination [PLMS4, PLMS1] and compares our methods to previous work. Note that besides DDIM Dhariwal & Nichol (2021), no previous work is specifically designed for accelerating guided sampling, thus the baselines in this comparison are only adaptations of the core numerical methods used in those papers. And to our knowledge, no prior guided-diffusion work uses splitting numerical methods. Non-split numerical method baselines are PLMS4, which is used in Liu et al. (2022), RK2, which is used in Karras et al. (2022); Lu et al. (2022), and higher-order RK4. We report the LPIPS scores of these methods with respect to the sampling time in Figure 3 and Table 1.
221
+
222
+ Without any splitting, PLMS4, RK2 and RK4 show significantly poorer image quality when used with short sampling times $< 10$ seconds. The best performer is our Strang splitting (STSP4), which can reach the same quality as 250-step DDIM while using $32 - 58\%$ less sampling time. STSP4 also obtains the highest LPIPS scores for sample times of 5, 10, 15, and 20 seconds. More statistical details and comparison with other split combinations are in Appendix F, G.
223
+
224
+ In addition, we perform a quantitative evaluation for class-conditional generation by sampling 50,000 images based on uniformly chosen class conditions with a small number of sampling steps and evaluating the Fenchel Inception Distance (FID) Heusel et al. (2017) (lower is better) and the improved precision/recall Kynkänniemi et al. (2019) (higher is better) against the ImageNet test set at 128, 256, and 512 resolutions. Following (Dhariwal & Nichol, 2021), we use a 25-step DDIM as a baseline, which already produces visually reasonable results. As PLMS and LTSP require the same number of network evaluations as the DDIM, they are used also with 25 steps. For STSP with a slower evaluation time, it is only allowed 20 steps, which is the highest number of steps such that its sampling time is within that of the baseline 25-step DDIM. Here LTSP2 and STSP2 are Lie-Trotter and Strang splitting methods with the combination [PLMS2, PLMS1]. In Table 2, we report the results for three different ImageNet resolutions and the average sampling time per image in seconds.
225
+
226
+ Our STSP4 performs best on all measurements except Recall on ImageNet512. On ImageNet512, PLMS4 has the highest Recall score but a poor FID of 16, indicating that the generated images have good distribution coverage but may poorly represent the real distribution. On ImageNet256, STSP4 can yield 4.49 FID in 20 steps, compared to 4.59 FID in 250 steps originally reported in the paper (Dhariwal & Nichol, 2021). Our STSP4 is about $9.4 \times$ faster when tested on the same machine.
227
+
228
+ # 4.3 SPLITTING METHODS IN OTHER TASKS
229
+
230
+ Besides class-conditional generation, our approach can also accelerate any conditional image generation as long as the gradient of the conditional function can be defined. We test our approach on four tasks: text-to-image generation, image inpainting, colorization, and super-resolution.
231
+
232
+ Text-to-image generation: We use a pre-trained text-to-image Disco-Diffusion (Letts et al., 2021) based on Crowson (2021), which substitutes the classifier output with the dot product of the image and caption encodings from CLIP (Radford et al., 2021). For more related experiments on StableDiffusion (Rombach et al., 2022), please refer to Appendix L, M.
233
+
234
+ <table><tr><td>Method</td><td>Steps</td><td>Time</td><td>FID</td><td>Prec</td><td>Rec</td></tr><tr><td colspan="6">ImageNet128</td></tr><tr><td>DDIM</td><td>25</td><td>0.55</td><td>6.69</td><td>0.78</td><td>0.49</td></tr><tr><td>PLMS2</td><td>25</td><td>0.57</td><td>5.71</td><td>0.80</td><td>0.51</td></tr><tr><td>PLMS4</td><td>25</td><td>0.57</td><td>4.97</td><td>0.80</td><td>0.53</td></tr><tr><td>LTSP2</td><td>25</td><td>0.55</td><td>5.14</td><td>0.81</td><td>0.51</td></tr><tr><td>LTSP4</td><td>25</td><td>0.55</td><td>3.85</td><td>0.81</td><td>0.54</td></tr><tr><td>STSP2</td><td>20</td><td>0.54</td><td>5.33</td><td>0.80</td><td>0.52</td></tr><tr><td>STSP4</td><td>20</td><td>0.54</td><td>3.78</td><td>0.81</td><td>0.54</td></tr><tr><td>ADM-G</td><td>250</td><td>5.59*</td><td>2.97</td><td>0.78</td><td>0.59</td></tr></table>
235
+
236
+ <table><tr><td>Method</td><td>Steps</td><td>Time</td><td>FID</td><td>Prec</td><td>Rec</td></tr><tr><td colspan="6">ImageNet256</td></tr><tr><td>DDIM</td><td>25</td><td>1.99</td><td>5.47</td><td>0.80</td><td>0.47</td></tr><tr><td>PLMS4</td><td>25</td><td>2.05</td><td>4.71</td><td>0.82</td><td>0.49</td></tr><tr><td>STSP4</td><td>20</td><td>1.95</td><td>4.49</td><td>0.83</td><td>0.50</td></tr><tr><td>ADM-G</td><td>250</td><td>20.9*</td><td>4.59</td><td>0.82</td><td>0.50</td></tr><tr><td colspan="6">ImageNet512</td></tr><tr><td>DDIM</td><td>25</td><td>5.56</td><td>9.07</td><td>0.81</td><td>0.42</td></tr><tr><td>PLMS4</td><td>25</td><td>5.78</td><td>16.00</td><td>0.75</td><td>0.51</td></tr><tr><td>STSP4</td><td>20</td><td>5.13</td><td>8.24</td><td>0.83</td><td>0.45</td></tr><tr><td>ADM-G</td><td>250</td><td>56.2*</td><td>7.72</td><td>0.87</td><td>0.42</td></tr></table>
237
+
238
+ Table 2: Comparison of different numerical methods using a few steps on guided diffusion sampling. Our methods and the best scores are highlighted in bold. We provide the reported scores from Dhariwal & Nichol (2021) using 250 sampling steps, referred to as ADM-G in their paper. *ADM-G's sampling times are measured using our machine.
239
+
240
+ "A beautiful painting of a singular lighthouse, shining its light across a tumultuous sea of blood, trending on artstation."
241
+
242
+ ![](images/04bd8acc5e641c62aa7b3d4c5cf1ab2986da12c4f40d57105c57566942214784.jpg)
243
+
244
+ ![](images/25e42ce1b44316ca8f1f10a2a64ce6e9e68efbe28717fb44b2c5af7786c98ef7.jpg)
245
+
246
+ ![](images/21383a2d7d4a3756e6bb06e0be1b74c55cb7d0f86b30940e262acd16df621072.jpg)
247
+
248
+ ![](images/24ec5c85f33615277529fc239592fc233914b8d8afe1544a12b722ca22941f1c.jpg)
249
+
250
+ ![](images/016ad5f15d0e5b2ef3afcb4512965035d9cce41454412dd69a29372938fd4b4d.jpg)
251
+
252
+ "A beautiful painting of a starry night, over a sunflower sea, trending on artstation."
253
+
254
+ ![](images/7fcce160ff9a1b615e84f08d6fc5bb0133be5ebce412bd8acd4430cf6fc70380.jpg)
255
+ Full DDIM (1,000 steps)
256
+
257
+ ![](images/a366fc41a743e889f3aa4cc9242ede6d8e96ad135ca0fb91e66c5e718f2f1194.jpg)
258
+ DDIM (45 steps)
259
+ (approximately using the same sampling time)
260
+
261
+ ![](images/34d694a32c0cf4eff2912e3188727321ec06299061811f68b617e207c25f90b5.jpg)
262
+ PLMS4 (45 steps)
263
+ Figure 4: Text-to-image generation using different sampling methods.
264
+
265
+ ![](images/46c34d84060fc20c7ffc8ae578c84d001d91dbf82c2a054d301a15f30ab3a21a.jpg)
266
+ LTSP4 (45 steps)
267
+
268
+ ![](images/d4ebae10036db5ee5e9208ab4bf3c28f8a360974b758b9d5de156c60c1f999e2.jpg)
269
+ STSP4 (30 steps)
270
+
271
+ Image inpainting & colorization: For these two tasks, we follow the techniques proposed in Song et al. (2020b) and Chung et al. (2022a), which improves the conditional functions of both tasks with "manifold constraints." We use the same diffusion model Dhariwal & Nichol (2021) trained on ImageNet as our earlier Experiments 4.1, 4.2.
272
+
273
+ Super-resolution: We follow the formulation from ILVR (Choi et al., 2021) combined with the manifold constraints Chung et al. (2022a), and also use our earlier ImageNet diffusion model.
274
+
275
+ Figure 4 compares our techniques, LTSP4 and STSP4, with the DDIM baseline and PLMS4 on text-to-image generation. Each result is produced using a fixed sampling time of about 26 seconds. STSP4, which uses 30 diffusion steps compared to 45 in the other methods, produces more realistic results with color contrast that is more similar to the full DDIM references'. Figure 5 shows that our STSP4 produces more convincing results than the DDIM baseline with fewer artifacts on the other three tasks while using the same 5 second sampling time. Implementation details, quantitative evaluations, and more results are in Appendix J, K.
276
+
277
+ # 5 DISCUSSION
278
+
279
+ Our findings show that when the sampling ODE consists of multiple terms from different networks, their numerical behaviors can be different and treating them separately can be more optimal. Another promising direction is to improve the behavior of the gradient of the conditional function / classifier itself and study whether related properties such as adversarial robustness or gradient smoothness can induce the desirable temporal smoothness in the sampling ODE. However, it is not yet clear what specific characteristics of the behavior play an important role. This challenge may be related to a
280
+
281
+ ![](images/37e3242db87c98c83edae52f8f95fe94dd1bd87ead4da7643d622e48f8ade602.jpg)
282
+ Figure 5: Guided-diffusion results of our STSP4 and DDIM on inpainting, colorization, and superresolution. Both methods were limited to use approximately the same sampling time.
283
+
284
+ condition called "stiffness" in solving ODEs Ernst & Gerhard (2010), which lacks a clear definition but describes the situation where explicit numerical methods, such as RK or PLMS, require a very small step size even in regions with smooth curvature.
285
+
286
+ As an alternative to the classifier-guided model, Ho & Salimans (2021) propose a classifier-free model that can perform conditional generation without a classifier while remaining a generative model. This model can utilize high-order methods as no classifier is involved, but it requires evaluating the classifier-free network twice per step, which is typically more expensive than evaluating a normal diffusion model and a classifier. It is important to note that our accelerating technique and classifier-free models are not mutually exclusive, and one can still apply a conditional function and our splitting technique to guide a classifier-free model in a direction it has not been trained for.
287
+
288
+ While our paper only focuses on ODEs derived from the deterministic sampling of DDIM, one can convert SDE-based diffusion models to ODEs (Karras et al., 2022) and still use our technique. More broadly, we can accelerate any diffusion model that can be expressed as a differential equation with a summation of two terms. When these terms behave differently, the benefit from splitting can be substantial. Nevertheless, our findings are based on common, existing models and $\sigma$ schedule from Dhariwal & Nichol (2021). Further investigation into the impact of the $\sigma$ schedule or different types and architectures of diffusion models is still required.
289
+
290
+ # 6 CONCLUSION
291
+
292
+ In this paper, we investigate the failure to accelerate guided diffusion sampling of classical high-order numerical methods and propose a solution based on splitting numerical methods. We found that the gradients of conditional functions are less suitable to classical high-order numerical methods and design a technique based on Strang splitting and a combination of forth- and first-order numerical methods. Our method achieves better LPIPS and FID scores than previous work given the same sampling time and is $32 - 58\%$ faster than a 250-step DDIM baseline. Our technique can successfully accelerate a variety of tasks, such as text-to-image generation, inpainting, colorization, and super-resolution.
293
+
294
+ # REFERENCES
295
+
296
+ Jooyoung Choi, Sungwon Kim, Yonghyun Jeong, Youngjune Gwon, and Sungroh Yoon. ILVR: Conditioning method for denoising diffusion probabilistic models. In 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 14347-14356. IEEE, 2021.
297
+ Hyungjin Chung, Byeongsu Sim, Dohoon Ryu, and Jong Chul Ye. Improving diffusion models for inverse problems using manifold constraints. In Advances in Neural Information Processing Systems, 2022a.
298
+ Hyungjin Chung, Byeongsu Sim, and Jong Chul Ye. Come-Closer-Diffuse-Faster: Accelerating conditional diffusion models for inverse problems through stochastic contraction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 12413-12422, 2022b.
299
+ Katherine Crowson. CLIP guided diffusion 512x512, secondary model method. https:// twitter.com/RiversHaveWings/status/1462859669454536711, 2021.
300
+ Prafulla Dhariwal and Alexander Nichol. Diffusion models beat GANs on image synthesis. Advances in Neural Information Processing Systems, 34:8780-8794, 2021.
301
+ Tim Dockhorn, Arash Vahdat, and Karsten Kreis. Score-based generative modeling with critically-damped Langevin diffusion. In International Conference on Learning Representations (ICLR), 2022.
302
+ Hairer Ernst and Wanner Gerhard. Solving Ordinary Differential Equations. 2: Stiff and Differential Algebraic Problems, volume 2. Springer, 2010.
303
+ Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. GANs trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017.
304
+ Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. In NeurIPS 2021 Workshop on Deep Generative Models and Downstream Applications, 2021.
305
+ Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. In Proceedings of the 34th International Conference on Neural Information Processing Systems, pp. 6840-6851, 2020.
306
+ Tero Karras, Miika Aittala, Timo Aila, and Samuli Laine. Elucidating the design space of diffusion-based generative models. In NeurIPS 2022 Workshop on Deep Generative Models and Downstream Applications, 2022.
307
+ Bahjat Kawar, Roy Ganz, and Michael Elad. Enhancing diffusion-based image synthesis with robust classifier guidance. arXiv preprint arXiv:2208.08664, 2022.
308
+ Tuomas Kynkänniemi, Tero Karras, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Improved precision and recall metric for assessing generative models. Advances in Neural Information Processing Systems, 32, 2019.
309
+ Adam Letts, Chris Scalf, Alex Spirin, Tom Mason, Chris Allen, Max Ingham, Mike Howles, Nate Baer, and David Sin. Disco diffusion. https://github.com/alembics/disco-diffusion, 2021.
310
+ Wei Li, Xue Xu, Xinyan Xiao, Jiachen Liu, Hu Yang, Guohao Li, Zhanpeng Wang, Zhifan Feng, Qiaoqiao She, Yajuan Lyu, et al. Upainting: Unified text-to-image diffusion generation with cross-modal guidance. arXiv preprint arXiv:2210.16031, 2022.
311
+ Luping Liu, Yi Ren, Zhijie Lin, and Zhou Zhao. Pseudo numerical methods for diffusion models on manifolds. In International Conference on Learning Representations, 2022.
312
+ Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu. DPM-Solver: A fast ode solver for diffusion probabilistic model sampling in around 10 steps. In Advances in Neural Information Processing Systems, 2022.
313
+
314
+ Eric Luhman and Troy Luhman. Knowledge distillation in iterative generative models for improved sampling speed. arXiv preprint arXiv:2101.02388, 2021.
315
+ Chenlin Meng, Yang Song, Jiaming Song, Jiajun Wu, Jun-Yan Zhu, and Stefano Ermon. SDEdit: Guided image synthesis and editing with stochastic differential equations. In International Conference on Learning Representations, 2021.
316
+ Alexander Quinn Nichol and Prafulla Dhariwal. Improved denoising diffusion probabilistic models. In International Conference on Machine Learning, pp. 8162-8171. PMLR, 2021.
317
+ Alexander Quinn Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. GLIDE: Towards photorealistic image generation and editing with text-guided diffusion models. In International Conference on Machine Learning, pp. 16784-16804. PMLR, 2022.
318
+ Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning, pp. 8748-8763. PMLR, 2021.
319
+ Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10684-10695, 2022.
320
+ Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. DreamBooth: Fine tuning text-to-image diffusion models for subject-driven generation. arXiv preprint arxiv:2208.12242, 2022.
321
+ Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015.
322
+ Tim Salimans and Jonathan Ho. Progressive distillation for fast sampling of diffusion models. In International Conference on Learning Representations, 2022.
323
+ Hiroshi Sasaki, Chris G Willcocks, and Toby P Breckon. Unit-DDPM: Unpaired image translation with denoising diffusion probabilistic models. arXiv preprint arXiv:2104.05358, 2021.
324
+ Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In International Conference on Learning Representations, 2020a.
325
+ Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. In International Conference on Learning Representations, 2020b.
326
+ Gilbert Strang. On the construction and comparison of difference schemes. SIAM journal on numerical analysis, 5(3):506-517, 1968.
327
+ Hale F Trotter. On the product of semi-groups of operators. Proceedings of the American Mathematical Society, 10(4):545-551, 1959.
328
+ Jinyi Wang, Zhaoyang Lyu, Dahua Lin, Bo Dai, and Hongfei Fu. Guided diffusion model for adversarial purification. arXiv preprint arXiv:2205.14969, 2022.
329
+ Daniel Watson, Jonathan Ho, Mohammad Norouzi, and William Chan. Learning to efficiently sample from diffusion probabilistic models. arXiv preprint arXiv:2106.03802, 2021.
330
+ Quanlin Wu, Hang Ye, and Yuntian Gu. Guided diffusion model for adversarial purification from random noise. arXiv preprint arXiv:2206.10875, 2022.
331
+ Qinsheng Zhang and Yongxin Chen. Fast sampling of diffusion models with exponential integrator. In NeurIPS 2022 Workshop on Score-Based Methods, 2022.
332
+ Qinsheng Zhang, Molei Tao, and Yongxin Chen. gDDIM: Generalized denoising diffusion implicit models. arXiv preprint arXiv:2206.05564, 2022.
333
+
334
+ Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 586-595, 2018.
335
+ Min Zhao, Fan Bao, Chongxuan Li, and Jun Zhu. EGSDE: Unpaired image-to-image translation via energy-guided stochastic differential equations. In Advances in Neural Information Processing Systems, 2022.
acceleratingguideddiffusionsamplingwithsplittingnumericalmethods/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3b9958ce56a21b95c89294a3be59c216ce2c73f475f79cee0dff0373ebf2096
3
+ size 713328
acceleratingguideddiffusionsamplingwithsplittingnumericalmethods/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5a9430d09fec624f4940c510091942c9d38f9f669f6c8c5b403d0d6d4b11786
3
+ size 372173
acceleratinghamiltonianmontecarloviachebyshevintegrationtime/74469fb9-cfd5-4e7f-8c5b-74a1af72385b_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd9f19bc1307f17f419f4698027210d03372aec1c406e9b974657c3a857d5279
3
+ size 121783
acceleratinghamiltonianmontecarloviachebyshevintegrationtime/74469fb9-cfd5-4e7f-8c5b-74a1af72385b_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04752be14c80f805f322416263a108d2b8c8724ba4d03a2b251b496d8e45f2bb
3
+ size 144634
acceleratinghamiltonianmontecarloviachebyshevintegrationtime/74469fb9-cfd5-4e7f-8c5b-74a1af72385b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4cd63890ab26e57d574ab1611244971bf3bbcff8620e235a7853a256724729f
3
+ size 779640
acceleratinghamiltonianmontecarloviachebyshevintegrationtime/full.md ADDED
@@ -0,0 +1,594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ACCELERATING HAMILTONIAN MONTE CARLO VIA CHEBYSHEV INTEGRATION TIME
2
+
3
+ Jun-Kun Wang and Andre Wibisono
4
+
5
+ Department of Computer Science, Yale University
6
+
7
+ {jun-kun.wang,andre.wibisono}@yale.edu
8
+
9
+ # ABSTRACT
10
+
11
+ Hamiltonian Monte Carlo (HMC) is a popular method in sampling. While there are quite a few works of studying this method on various aspects, an interesting question is how to choose its integration time to achieve acceleration. In this work, we consider accelerating the process of sampling from a distribution $\pi(x) \propto \exp(-f(x))$ via HMC via time-varying integration time. When the potential $f$ is $L$ -smooth and $m$ -strongly convex, i.e. for sampling from a log-smooth and strongly log-concave target distribution $\pi$ , it is known that under a constant integration time, the number of iterations that ideal HMC takes to get an $\epsilon$ Wasserstein-2 distance to the target $\pi$ is $O(\kappa \log \frac{1}{\epsilon})$ , where $\kappa \coloneqq \frac{L}{m}$ is the condition number. We propose a scheme of time-varying integration time based on the roots of Chebyshev polynomials. We show that in the case of quadratic potential $f$ , i.e. when the target $\pi$ is a Gaussian distribution, ideal HMC with this choice of integration time only takes $O(\sqrt{\kappa} \log \frac{1}{\epsilon})$ number of iterations to reach Wasserstein-2 distance less than $\epsilon$ ; this improvement on the dependence on condition number is akin to acceleration in optimization. The design and analysis of HMC with the proposed integration time is built on the tools of Chebyshev polynomials. Experiments find the advantage of adopting our scheme of time-varying integration time even for sampling from distributions with smooth strongly convex potentials that are not quadratic.
12
+
13
+ # 1 INTRODUCTION
14
+
15
+ Markov chain Monte Carlo (MCMC) algorithms are fundamental techniques for sampling from probability distributions, which is a task that naturally arises in statistics (Duane et al., 1987; Girolami & Calderhead, 2011), optimization (Flaxman et al., 2005; Duchi et al., 2012; Jin et al., 2017), machine learning and others (Wenzel et al., 2020; Salakhutdinov & Mnih, 2008; Koller & Friedman, 2009; Welling & Teh, 2011). Among all the MCMC algorithms, the most popular ones perhaps are Langevin methods (Li et al., 2022; Dalalyan, 2017; Durmus et al., 2019; Vempala & Wibisono, 2019; Lee et al., 2021b; Chewi et al., 2020) and Hamiltonian Monte Carlo (HMC) (Neal, 2012; Betancourt, 2017; Hoffman & Gelman, 2014; Levy et al., 2018). For the former, recently there have been a sequence of works leveraging some techniques in optimization to design Langevin methods, which include borrowing the idea of momentum methods like Nesterov acceleration (Nesterov, 2013) to design fast methods, e.g., (Ma et al., 2021; Dalalyan & Riou-Durand, 2020). Specifically, Ma et al. (2021) show that for sampling from distributions satisfying the log-Sobolev inequality, under-damped
16
+
17
+ Langevin improves the iteration complexity of over-damped Langevin from $O\left(\frac{d}{\epsilon}\right)$ to $O\left(\sqrt{\frac{d}{\epsilon}}\right)$ , where $d$ is the dimension and $\epsilon$ is the error in KL divergence, though whether their result has an optimal dependency on the condition number is not clear. On the other hand, compared to Langevin methods, the connection between HMCs and techniques in optimization seems rather loose. Moreover, to our knowledge, little is known about how to accelerate HMCs with a provable acceleration guarantee for converging to a target distribution. Specifically, Chen & Vempala (2019) show that for sampling from strongly log-concave distributions, the iteration complexity of ideal HMC is $O\left(\kappa \log \frac{1}{\epsilon}\right)$ , and Vishnoi (2021) shows the same rate of ideal HMC when the potential is strongly convex quadratic in a nice tutorial. In contrast, there are a few methods that exhibit acceleration when minimizing strongly convex quadratic functions in optimization. For example, while Heavy Ball (Polyak, 1964) does not have an accelerated linear rate globally for minimizing general smooth strongly convex functions, it does show acceleration when minimizing strongly convex quadratic functions (Wang et al., 2020;
18
+
19
+ # Algorithm 1: IDEAL HMC
20
+
21
+ 1: Require: an initial point $x_0 \in \mathbb{R}^d$ , number of iterations $K$ , and a scheme of integration time $\{\eta_k^{(K)}\}$ .
22
+ 2: for $k = 1$ to $K$ do
23
+ 3: Sample velocity $\xi \sim N(0, I_d)$ .
24
+ 4: Set $(x_{k},v_{k}) = \mathrm{HMC}_{\eta_{k}^{(K)}}(x_{k - 1},\xi)$
25
+ 5: end for
26
+
27
+ 2021; 2022). This observation makes us wonder whether one can get an accelerated linear rate of ideal HMC for sampling, i.e., $O\left(\sqrt{\kappa}\log \frac{1}{\epsilon}\right)$ , akin to acceleration in optimization.
28
+
29
+ We answer this question affirmatively, at least in the Gaussian case. We propose a time-varying integration time for HMC, and we show that ideal HMC with this time-varying integration time exhibits acceleration when the potential is a strongly convex quadratic (i.e. the target $\pi$ is a Gaussian), compared to what is established in Chen & Vempala (2019) and Vishnoi (2021) for using a constant integration time. Our proposed time-varying integration time at each iteration of HMC depends on the total number of iterations $K$ , the current iteration index $k$ , the strong convexity constant $m$ , and the smoothness constant $L$ of the potential; therefore, the integration time at each iteration is simple to compute and is set before executing HMC. Our proposed integration time is based on the roots of Chebyshev polynomials, which we will describe in details in the next section. In optimization, Chebyshev polynomials have been used to help design accelerated algorithms for minimizing strongly convex quadratic functions, i.e., Chebyshev iteration (see e.g., Section 2.3 in d'Aspremont et al. (2021)). Our result of accelerating HMC via using the proposed Chebyshev integration time can be viewed as the sampling counterpart of acceleration from optimization. Interestingly, for minimizing strongly convex quadratic functions, acceleration of vanilla gradient descent can be achieved via a scheme of step sizes that is based on a Chebyshev polynomial, see e.g., Agarwal et al. (2021), and our work is inspired by a nice blog article by Pedregosa (2021). Hence, our acceleration result of HMC can also be viewed as a counterpart in this sense. In addition to our theoretical findings, we conduct experiments of sampling from a Gaussian as well as sampling from distributions whose potentials are not quadratics, which include sampling from a mixture of two Gaussians, Bayesian logistic regression, and sampling from a hard distribution that was proposed in Lee et al. (2021a) for establishing some lower-bound results of certain Metropolized sampling methods. Experimental results show that our proposed time-varying integration time also leads to a better performance compared to using the constant integration time of Chen & Vempala (2019) and Vishnoi (2021) for sampling from the distributions whose potential functions are not quadratic. We conjecture that our proposed time-varying integration time also helps accelerate HMC for sampling from log-smooth and strongly log-concave distributions, and we leave the analysis of such cases for future work.
30
+
31
+ # 2 PRELIMINARIES
32
+
33
+ # 2.1 HAMILTONIAN MONTE CARLO (HMC)
34
+
35
+ Suppose we want to sample from a target probability distribution $\nu(x) \propto \exp(-f(x))$ on $\mathbb{R}^d$ , where $f: \mathbb{R}^d \to \mathbb{R}$ is a continuous function which we refer to as the potential.
36
+
37
+ Denote $x \in \mathbb{R}^d$ the position and $v \in \mathbb{R}^d$ the velocity of a particle. In this paper, we consider the standard Hamiltonian of the particle (Chen & Vempala, 2019; Neal, 2012), which is defined as
38
+
39
+ $$
40
+ H (x, v) := f (x) + \frac {1}{2} \| v \| ^ {2}, \tag {1}
41
+ $$
42
+
43
+ while we refer the readers to Girolami & Calderhead (2011); Hirt et al. (2021); Brofos & Lederman (2021) and the references therein for other notions of the Hamiltonian. The Hamiltonian flow generated by $H$ is the flow of the particle which evolves according to the following differential equations:
44
+
45
+ $$
46
+ \frac {d x}{d t} = \frac {\partial H}{\partial v} \quad \text {a n d} \quad \frac {d v}{d t} = - \frac {\partial H}{\partial x}.
47
+ $$
48
+
49
+ For the standard Hamiltonian defined in (1), the Hamiltonian flow becomes
50
+
51
+ $$
52
+ \frac {d x}{d t} = v \quad \text {a n d} \quad \frac {d v}{d t} = - \nabla f (x). \tag {2}
53
+ $$
54
+
55
+ We will write $(x_{t},v_{t}) = \mathrm{HMC}_{t}(x_{0},v_{0})$ as the position $x$ and the velocity $v$ of the Hamiltonian flow after integration time $t$ starting from $(x_0,v_0)$ . There are many important properties of the Hamiltonian flow including that the Hamiltonian is conserved along the flow, the vector field associated with the flow is divergence free, and the Hamiltonian dynamic is time reversible, see e.g., Section 3 in Vishnoi (2021).
56
+
57
+ The Ideal HMC algorithm (see Algorithm 1) proceeds as follows: in each iteration $k$ , sample an initial velocity from the normal distribution, and then flow following the Hamiltonian flow with a pre-specified integration time $\eta_{k}$ . It is well-known that ideal HMC preserves the target density $\pi(x) \propto \exp(-f(x))$ ; see e.g., Theorem 5.1 in Vishnoi (2021). Furthermore, in each iteration, HMC brings the density of the iterates $x_{k} \sim \rho_{k}$ closer to the target $\pi$ . However, the Hamiltonian flow $\mathrm{HMC}_t(x_0, v_0)$ is in general difficult to simulate exactly, except for some special potentials. In practice, the Verlet integrator is commonly used to approximate the flow and a Metropolis-Hastings filter is applied to correct the induced bias arises from the use of the integrator (Tripuraneni et al., 2017; Brofos & Lederman, 2021; Hoffman et al., 2021; Lee et al., 2021a; Chen et al., 2020). In recent years, there have been some progress on showing some rigorous theoretical guarantees of HMCs for converging to a target distribution, e.g., Chen et al. (2020); Durmus et al. (2017); Bou-Rabee & Eberle (2021); Mangoubi & Smith (2019; 2021); Mangoubi & Vishnoi (2018). There are also other variants of HMCs proposed in the literature, e.g., Riou-Durand & Vogrinc (2022); Bou-Rabee & Sanz-Serna (2017); Zou & Gu (2021); Steeg & Galstyan (2021); Hoffman & Gelman (2014); Tripuraneni et al. (2017); Chen et al. (2014), to name just a few.
58
+
59
+ Recall that the 2-Wasserstein distance between probability distributions $\nu_{1}$ and $\nu_{2}$ is
60
+
61
+ $$
62
+ \mathrm {W} _ {2} (\nu_ {1}, \nu_ {2}) := \inf _ {x, y \in \Gamma (\nu_ {1}, \nu_ {2})} \mathbb {E} \left[ \| x - y \| ^ {2} \right] ^ {1 / 2}
63
+ $$
64
+
65
+ where $\Gamma (\nu_{1},\nu_{2})$ represents the set of all couplings of $\nu_{1}$ and $\nu_{2}$ .
66
+
67
+ # 2.2 ANALYSIS OF HMC IN QUADRATIC CASE WITH CONSTANT INTEGRATION TIME
68
+
69
+ In the following, we replicate the analysis of ideal HMC with a constant integration time for quadratic potentials (Vishnoi, 2021), which provides the necessary ingredients for introducing our method in the next section. Specifically, we consider the following quadratic potential:
70
+
71
+ $$
72
+ f (x) := \sum_ {j = 1} ^ {d} \lambda_ {j} x _ {j} ^ {2}, \text {w h e r e} 0 < m \leq \lambda_ {j} \leq L, \tag {3}
73
+ $$
74
+
75
+ which means the target density is the Gaussian distribution $\pi = \mathcal{N}(0,\Lambda^{-1})$ , where $\Lambda$ the diagonal matrix whose $j^{\mathrm{th}}$ diagonal entry is $\lambda_{j}$ . We note for a general Gaussian target $\mathcal{N}(\mu ,\Sigma)$ for some $\mu \in \mathbb{R}^d$ and $\Sigma \succ 0$ , we can shift and rotate the coordinates to make $\mu = 0$ and $\Sigma$ a diagonal matrix, and our analysis below applies. So without loss of generality, we may assume the quadratic potential is separable, as in (3).
76
+
77
+ In this quadratic case, the Hamiltonian flow (2) becomes a linear system of differential equations, and we have an exact solution given by sinusoidal functions, which are
78
+
79
+ $$
80
+ x _ {t} [ j ] = \cos (\sqrt {2 \lambda_ {j}} t) x _ {0} [ j ] + \frac {1}{\sqrt {2 \lambda_ {j}}} \sin (\sqrt {2 \lambda_ {j}} t) v _ {0} [ j ], \tag {4}
81
+ $$
82
+
83
+ $$
84
+ v _ {t} [ j ] = - \sqrt {2 \lambda_ {j}} \sin \left(\sqrt {2 \lambda_ {j}} t\right) x _ {0} [ j ] + \cos \left(\sqrt {2 \lambda_ {j}} t\right) v _ {0} [ j ].
85
+ $$
86
+
87
+ In particular, we recall the following result on the deviation between two co-evolving particles with the same initial velocity.
88
+
89
+ Lemma 1. (Vishnoi, 2021) Let $x_0, y_0 \in \mathbb{R}^d$ . Consider the following coupling: $(x_t, v_t) = \mathrm{HMC}_t(x_0, \xi)$ and $(y_t, u_t) = \mathrm{HMC}_t(y_0, \xi)$ for some $\xi \in \mathbb{R}^d$ . Then for all $t \geq 0$ and for all $j \in [d]$ , it holds that
90
+
91
+ $$
92
+ x _ {t} [ j ] - y _ {t} [ j ] = \cos \left(\sqrt {2 \lambda_ {j}} t\right) \times \left(x _ {0} [ j ] - y _ {0} [ j ]\right).
93
+ $$
94
+
95
+ Using Lemma 1, we can derive the convergence rate of ideal HMC for the quadratic potential as follows.
96
+
97
+ Lemma 2. (Vishnoi, 2021) Let $\pi \propto \exp(-f) = \mathcal{N}(0, \Lambda^{-1})$ be the target distribution, where $f(x)$ is defined on (3). Let $\rho_{K}$ be the distribution of $x_{K}$ generated by Algorithm 1 at the final iteration $K$ . Then for any $\rho_{0}$ and any $K \geq 1$ , we have
98
+
99
+ $$
100
+ W _ {2} (\rho_ {K}, \pi) \leq \max _ {j \in [ d ]} \left| \Pi_ {k = 1} ^ {K} \cos \left(\sqrt {2 \lambda_ {j}} \eta_ {k} ^ {(K)}\right) \right| W _ {2} (\rho_ {0}, \pi).
101
+ $$
102
+
103
+ We replicate the proof of Lemma 1 and Lemma 2 in Appendix B for the reader's convenience.
104
+
105
+ Vishnoi (2021) shows that by choosing
106
+
107
+ $$
108
+ \text {(C o n s t a n t i n t e g r a t i o n t i m e)} \quad \eta_ {k} ^ {(K)} = \frac {\pi}{2} \frac {1}{\sqrt {2 L}}, \tag {5}
109
+ $$
110
+
111
+ one has that $\cos \left(\sqrt{2\lambda_j}\eta_k^{(K)}\right)\leq 1 - \Theta \left(\frac{m}{L}\right)$ for all the iterations $k\in [K]$ and dimensions $j\in [d]$ . Hence, by Lemma 2, the distance satisfies
112
+
113
+ $$
114
+ W _ {2} (\rho_ {K}, \pi) = O \left(\left(1 - \Theta \left(\frac {m}{L}\right)\right) ^ {K}\right) W _ {2} (\rho_ {0}, \pi)
115
+ $$
116
+
117
+ after $K$ iterations of ideal HMC with the constant integration time. On the other hand, for general smooth strongly convex potentials $f(\cdot)$ , Chen & Vempala (2019) show the same convergence rate $1 - \Theta\left(\frac{m}{L}\right)$ of HMC using a constant integration time $\eta_k^{(K)} = \frac{c}{\sqrt{L}}$ , where $c > 0$ is a universal constant. Therefore, under the constant integration time, HMC needs $O(\kappa \log \frac{1}{\epsilon})$ iterations to reach error $W_2(\rho_K, \pi) \leq \epsilon$ , where $\kappa = \frac{L}{m}$ is condition number. Furthermore, they also show that the relaxation time of ideal HMC with a constant integration time is $\Omega(\kappa)$ for the Gaussian case.
118
+
119
+ # 2.3 CHEBYSHEV POLYNOMIALS
120
+
121
+ We denote $\Phi_K(\cdot)$ the degree- $K$ Chebyshev polynomial of the first kind, which is defined by:
122
+
123
+ $$
124
+ \Phi_ {K} (x) = \left\{ \begin{array}{l l} \cos (K \operatorname {a r c c o s} (x)) & \text {i f} x \in [ - 1, 1 ], \\ \cosh (K \operatorname {a r c c o s h} (x)) & \text {i f} x > 1, \\ (- 1) ^ {K} \cosh (K \operatorname {a r c c o s h} (x)) & \text {i f} x < 1. \end{array} \right. \tag {6}
125
+ $$
126
+
127
+ Our proposed integration time is built on a scaled-and-shifted Chebyshev polynomial, defined as:
128
+
129
+ $$
130
+ \bar {\Phi} _ {K} (\lambda) := \frac {\Phi_ {K} (h (\lambda))}{\Phi_ {K} (h (0))}, \tag {7}
131
+ $$
132
+
133
+ where $h(\cdot)$ is the mapping $h(\lambda) \coloneqq \frac{L + m - 2\lambda}{L - m}$ . Observe that the mapping $h(\cdot)$ maps all $\lambda \in [m, L]$ into the interval $[-1, 1]$ . The roots of the degree- $K$ scaled-and-shifted Chebyshev polynomial $\bar{\Phi}_K(\lambda)$ are
134
+
135
+ $$
136
+ \text {(C h e b y s h e v r o o t s)} \quad r _ {k} ^ {(K)} := \frac {L + m}{2} - \frac {L - m}{2} \cos \left(\frac {\left(k - \frac {1}{2}\right) \pi}{K}\right), \tag {8}
137
+ $$
138
+
139
+ where $k = 1,2,\dots ,K$ , i.e., $\bar{\Phi}_K(r_k^{(K)}) = 0$ . We now recall the following key result regarding the scaled-and-shifted Chebyshev polynomial $\bar{\Phi}_K$ .
140
+
141
+ Lemma 3. (e.g., Section 2.3 in d'Aspremont et al. (2021)) For any positive integer $K$ , we have
142
+
143
+ $$
144
+ \max _ {\lambda \in [ m, L ]} \left| \bar {\Phi} _ {K} (\lambda) \right| \leq 2 \left(1 - 2 \frac {\sqrt {m}}{\sqrt {L} + \sqrt {m}}\right) ^ {K} = O \left(\left(1 - \Theta \left(\sqrt {\frac {m}{L}}\right)\right) ^ {K}\right). \tag {9}
145
+ $$
146
+
147
+ The proof of Lemma 3 is in Appendix B.
148
+
149
+ # 3 CHEBYSHEV INTEGRATION TIME
150
+
151
+ We are now ready to introduce our scheme of time-varying integration time. Let $K$ be the pre-specified total number of iterations of HMC. Our proposed method will first permute the array $[1,2,\dots ,K]$ before executing HMC for $K$ iterations. Denote $\sigma (k)$ the $k_{\mathrm{th}}$ element of the array $[1,2,\dots ,K]$ after an arbitrary permutation $\sigma$ . Then, we propose to set the integration time of HMC at iteration $k$ , i.e., set $\eta_k^{(K)}$ , as follows:
152
+
153
+ ![](images/667357e0438fbd864a547c7a7568a42bbece7f9ec9bd24689b986bb1ef89ff32.jpg)
154
+ Figure 1: Left: Set $K = 400$ , $m = 1$ and $L = 100$ . The green solid line (Chebyshev integration time (10)) on the subfigure represents $\max_{\lambda \in \{m, m + 0.1, \dots, L\}} \left| \Pi_{s=1}^{k} \cos \left( \sqrt{2\lambda} \eta_{s}^{(K)} \right) \right| = \left| \Pi_{s=1}^{k} \cos \left( \frac{\pi}{2} \sqrt{\frac{\lambda}{r_{\sigma(s)}^{(K)}}} \right) \right|$ v.s. $k$ , while the blue dash line (Constant integration time (5)) represents $\max_{\lambda \in \{m, m + 0.1, \dots, L\}} \left| \Pi_{s=1}^{k} \cos \left( \sqrt{2\lambda} \eta_{s}^{(K)} \right) \right| = \left| \Pi_{s=1}^{k} \cos \left( \frac{\pi}{2} \sqrt{\frac{\lambda}{L}} \right) \right|$ v.s. $k$ . Since the cosine product controls the convergence rate of the $W_{2}$ distance by Lemma 2, this confirms the acceleration via using the proposed scheme of Chebyshev integration over the constant integration time (Chen & Vempala, 2019; Vishnoi, 2021). Right: $\psi(x) = \frac{\cos\left(\frac{\pi}{2}\sqrt{x}\right)}{1 - x}$ v.s. $x$ .
155
+
156
+ ![](images/ddbd0caf79bd9cd59658af0ecb4e591cf41f38641b5536053f3ccab48edf7cc1.jpg)
157
+
158
+ $$
159
+ \text {(C h e b y s h e v i n t e g r a t i o n t i m e)} \quad \eta_ {k} ^ {(K)} = \frac {\pi}{2} \frac {1}{\sqrt {2 r _ {\sigma (k)} ^ {(K)}}}. \tag {10}
160
+ $$
161
+
162
+ We note the usage of the permutation $\sigma$ is not needed in our analysis below; however, it seems to help improve performance in practice. Specifically, though the guarantees of HMC at the final iteration $K$ provided in Theorem 1 and Lemma 4 below is the same regardless of the permutation, the progress of HMC varies under different permutations of the integration time, which is why we recommend an arbitrary permutation of the integration time in practice.
163
+
164
+ Our main result is the following improved convergence rate of HMC under the Chebyshev integration time, for quadratic potentials.
165
+
166
+ Theorem 1. Denote the target distribution $\pi \propto \exp (-f(x)) = \mathcal{N}(0,\Lambda^{-1})$ , where $f(x)$ is defined on (3), and denote the condition number $\kappa \coloneqq \frac{L}{m}$ . Let $\rho_{K}$ be the distribution of $x_{K}$ generated by Algorithm 1 at the final iteration $K$ . Then, we have
167
+
168
+ $$
169
+ W _ {2} (\rho_ {K}, \pi) \leq 2 \left(1 - 2 \frac {\sqrt {m}}{\sqrt {L} + \sqrt {m}}\right) ^ {K} W _ {2} (\rho_ {0}, \pi) = O \left(\left(1 - \Theta \left(\frac {1}{\sqrt {\kappa}}\right)\right) ^ {K}\right) W _ {2} (\rho_ {0}, \pi).
170
+ $$
171
+
172
+ Consequently, the total number of iterations $K$ such that the Wasserstein-2 distance satisfies $W_{2}(\rho_{K},\pi)\leq \epsilon$ is $O\left(\sqrt{\kappa}\log \frac{1}{\epsilon}\right)$ .
173
+
174
+ Theorem 1 shows an accelerated linear rate $1 - \Theta\left(\frac{1}{\sqrt{\kappa}}\right)$ using Chebyshev integration time, and hence improves the previous result of $1 - \Theta\left(\frac{1}{\kappa}\right)$ as discussed above. The proof of Theorem 1 relies on the following lemma, which upper-bounds the cosine products that appear in the bound of the $W_{2}$ distance in Lemma 2 by the scaled-and-shifted Chebyshev polynomial $\bar{\Phi}_K(\lambda)$ on (7).
175
+
176
+ Lemma 4. Denote $|P_K^{\mathrm{Cos}}(\lambda)| \coloneqq \left|\Pi_{k = 1}^{K}\cos \left(\frac{\pi}{2}\sqrt{\frac{\lambda}{r_{\sigma(k)}^{(K)}}}\right)\right|$ . Suppose $\lambda \in [m,L]$ . Then, we have for any positive integer $K$ ,
177
+
178
+ $$
179
+ \left| P _ {K} ^ {\operatorname {C o s}} (\lambda) \right| \leq \left| \bar {\Phi} _ {K} (\lambda) \right|. \tag {11}
180
+ $$
181
+
182
+ The proof of Lemma 4 is available in Appendix C. Figure 1 compares the cosine product $\max_{\lambda \in [m,L]}\left|\Pi_{s = 1}^{k}\cos \left(\sqrt{2\lambda}\eta_{s}^{(K)}\right)\right|$ in Lemma 2 of using the proposed integration time and that
183
+
184
+ Algorithm 2: HMC WITH CHEBYSHEV INTEGRATION TIME
185
+ 1: Given: a potential $f(\cdot)$ , where $\pi(x) \propto \exp(-f(x))$ and $f(\cdot)$ is $L$ -smooth and $m$ -strongly convex.
186
+ 2: Require: number of iterations $K$ and the step size of the leapfrog steps $\theta$ .
187
+ 3: Define $r_k^{(K)} := \frac{L + m}{2} - \frac{L - m}{2} \cos\left(\frac{(k - \frac{1}{2})\pi}{K}\right)$ , for $k = 1, \ldots, K$ .
188
+ 4: Arbitrarily permute the array $[1, 2, \ldots, K]$ . Denote $\sigma(k)$ the $k_{\mathrm{th}}$ element of the array after permutation.
189
+ 5: for $k = 1, 2, \ldots, K$ do
190
+ 6: Sample velocity $\xi_k \sim N(0, I_d)$ .
191
+ 7: Set integration time $\eta_k^{(K)} \gets \frac{\pi}{2} \frac{1}{\sqrt{2r_{\sigma(k)}^{(K)}}}$ .
192
+ 8: Set the number of leapfrog steps $S_k \gets \lfloor \frac{\eta_k^{(K)}}{\theta} \rfloor$ .
193
+ 9: $(\bar{x}_0, \bar{v}_0) \gets (x_{k-1}, \xi_k)$
194
+ % Leapfrog steps
195
+ 10: for $s = 0, 2, \ldots, S_k - 1$ do
196
+ 11: $\bar{v}_{s+1} = \bar{v}_s - \frac{\theta}{2} \nabla f(\bar{x}_s)$ ; $\bar{x}_{s+1} = \bar{x}_s + \theta \bar{v}_{s+\frac{1}{2}}$ ; $\bar{v}_{s+1} = \bar{v}_{s+\frac{1}{2}} - \frac{\theta}{2} \nabla f(\bar{x}_{s+1})$ ;
197
+ 12: end for
198
+ % Metropolis filter
199
+ 13: Compute the acceptance ratio $\alpha_k = \min\left(1, \frac{\exp(-H(\bar{x}_{S_k}, \bar{v}_{S_k}))}{\exp(-H(\bar{x}_0, \bar{v}_0))}\right)$ .
200
+ 14: Draw $\zeta \sim \text{Uniform}[0, 1]$ .
201
+ 15: If $\zeta < \alpha_k$ then
202
+ 16: $x_k \gets \bar{x}_{S_k}$
203
+ 17: Else
204
+ 18: $x_k \gets x_{k-1}$ .
205
+ 19: end for
206
+
207
+ of using the constant integration time, which illustrates acceleration via the proposed Chebyshev integration time.
208
+
209
+ We now provide the proof of Theorem 1.
210
+
211
+ Proof. (of Theorem 1) From Lemma 2, we have
212
+
213
+ $$
214
+ W _ {2} \left(\rho_ {K}, \pi\right) \leq \max _ {j \in [ d ]} \left| \Pi_ {k = 1} ^ {K} \cos \left(\sqrt {2 \lambda_ {j}} \eta_ {k} ^ {(K)}\right) \right| \cdot W _ {2} \left(\rho_ {0}, \pi\right). \tag {12}
215
+ $$
216
+
217
+ We can upper-bound the cosine product of any $j \in [d]$ as,
218
+
219
+ $$
220
+ \left| \Pi_ {k = 1} ^ {K} \cos \left(\sqrt {2 \lambda_ {j}} \eta_ {k} ^ {(K)}\right) \right| \stackrel {(a)} {=} \left| \Pi_ {k = 1} ^ {K} \cos \left(\frac {\pi}{2} \sqrt {\frac {\lambda_ {j}}{r _ {\sigma (k)} ^ {(K)}}}\right) \right| \stackrel {(b)} {\leq} \left| \bar {\Phi} _ {K} (\lambda_ {j}) \right| \stackrel {(c)} {\leq} 2 \left(1 - 2 \frac {\sqrt {m}}{\sqrt {L} + \sqrt {m}}\right) ^ {K}, \tag {13}
221
+ $$
222
+
223
+ where (a) is due to the use of Chebyshev integration time (10), (b) is by Lemma 4, and (c) is by Lemma 3. Combining (12) and (13) leads to the result.
224
+
225
+ HMC with Chebyshev Integration Time for General Distributions To sample from general strongly log-concave distributions, we propose Algorithm 2, which adopts the Verlet integrator (a.k.a. the leapfrog integrator) to simulate the Hamiltonian flow $\mathrm{HMC}_{\eta}(\cdot ,\xi)$ and uses Metropolis filter to correct the bias. It is noted that the number of leapfrog steps $S_{k}$ in each iteration $k$ is equal to the integration time $\eta_k^{(K)}$ divided by the step size $\theta$ used in the leapfrog steps. More precisely, we have $S_{k} = \left\lfloor \frac{\eta_{k}^{(K)}}{\theta}\right\rfloor$ in iteration $k$ of HMC.
226
+
227
+ # 4 EXPERIMENTS
228
+
229
+ We now evaluate HMC with the proposed Chebyshev integration time (Algorithm 2) and HMC with the constant integration time (Algorithm 2 with line 7 replaced by the constant integration time (5)) in several tasks. For all the tasks in the experiments, the total number of iterations of HMCs is set to be $K = 10,000$ , and hence we collect $K = 10,000$ samples along the trajectory. For the step size $\theta$ in the leapfrog steps, we let $\theta \in \{0.001, 0.005, 0.01, 0.05\}$ . To evaluate the methods, we
230
+
231
+ Table 1: Ideal HMC with $K = {10},{000}$ iterations for sampling from a Gaussian $\mathcal{N}\left( {\mu ,\sum }\right)$ ,where $\mu = \left\lbrack \begin{matrix} 0 \\ 0 \end{matrix}\right\rbrack$ and $\sum = \left\lbrack \begin{matrix} 1 & 0 \\ 0 & {100} \end{matrix}\right\rbrack$ . Here,Cheby. (W/) is ideal HMC with a arbitrary permutation of the Chebyshev integration time, while Cheby. (W/O) is ideal HMC without a permutation; and Const. refers to using the constant integration time (5).
232
+
233
+ <table><tr><td>Method</td><td>Mean ESS</td><td>Min ESS</td></tr><tr><td>Cheby. (W/)</td><td>10399.00811 ± 347.25021</td><td>7172.50338 ± 257.21244</td></tr><tr><td>Cheby. (W/O)</td><td>10197.09964 ± 276.94894</td><td>7043.55293 ± 284.78037</td></tr><tr><td>Const.</td><td>7692.00382 ± 207.19628</td><td>5533.26519 ± 213.31943</td></tr></table>
234
+
235
+ compute effective sample size (ESS), which is a common performance metric of HMCs (Girolami & Calderhead, 2011; Brofos & Lederman, 2021; Hirt et al., 2021; Riou-Durand & Vogrinc, 2022; Hoffman et al., 2021; Hoffman & Gelman, 2014; Steeg & Galstyan, 2021), by using the toolkit ArViz (Kumar et al., 2019). The ESS of a sequence of $N$ dependent samples is computed based on the autocorrelations within the sequence at different lags: $\mathrm{ESS} \coloneqq N / (1 + 2\sum_{k}\gamma (k))$ , where $\gamma (k)$ is an estimate of the autocorrelation at lag $k$ . We consider 4 metrics, which are (1) Mean ESS: the average of ESS of all variables. That is, ESS is computed for each variable/dimension, and Mean ESS is the average of them. (2) Min ESS: the lowest value of ESS among the ESSs of all variables; (3) Mean ESS/Sec.: Mean ESS normalized by the CPU time in seconds; (4) Min ESS/Sec.: Minimum ESS normalized by the CPU time in seconds. In the following tables, we denote "Cheby." as our proposed method, and "Const." as HMC with the constant integration time (Vishnoi, 2021; Chen & Vempala, 2019). Each of the configurations is repeated 10 times, and we report the average and the standard deviation of the results. We also report the acceptance rate of the Metropolis filter (Acc. Prob) on the tables. Our implementation of the experiments is done by modifying a publicly available code of HMCs by Brofos & Lederman (2021). Code for our experiments can be found in the supplementary.
236
+
237
+ # 4.1 IDEAL HMC FLOW FOR SAMPLING FROM A GUSSIAN WITH A DIAGONAL COVARIANCE
238
+
239
+ Before evaluating the empirical performance of Algorithm 2 in the following subsections, here we discuss and compare the use of a arbitrary permutation of the Chebyshev integration time and that without permutation (as well as that of using a constant integration time). We simulate ideal HMC for sampling from a Gaussian $\mathcal{N}(\mu, \Sigma)$ , where $\mu = \begin{bmatrix} 0 \\ 0 \end{bmatrix}$ and $\Sigma = \begin{bmatrix} 1 & 0 \\ 0 & 100 \end{bmatrix}$ . It is noted that ideal HMC flow for this case has a closed-form solution as (4) shows. The result are reported on Table 1.
240
+
241
+ From the table, the use of a Chebyshev integration time allows to obtain a larger ESS than that from using a constant integration time, and a arbitrary permutation helps get a better result. An explanation is that the ESS is a quantity that is computed along the trajectory of a chain, and therefore a permutation of the integration time could make a difference. We remark that the observation here (a arbitrary permutation of time generates a larger ESS) does not contradict to Theorem 1, since Theorem 1 is about the guarantee in $W_{2}$ distance at the last iteration $K$ .
242
+
243
+ # 4.2 SAMPLING FROM A GAUSSIAN
244
+
245
+ We sample $\mathcal{N}(\mu, \Sigma)$ , where $\mu = \begin{bmatrix} 0 \\ 1 \end{bmatrix}$ and $\Sigma = \begin{bmatrix} 1 & 0.5 \\ 0.5 & 100 \end{bmatrix}$ . Therefore, the strong convexity constant $m$ is approximately 0.01 and the smoothness constant $L$ is approximately 1. Table 2 shows the results. HMC with Chebyshev integration time consistently outperforms that of using the constant integration time in terms of all the metrics: Mean ESS, Min ESS, Mean ESS/Sec, and Min ESS/Sec.
246
+
247
+ We also plot two quantities throughout the iterations of HMCs on Figure 2. Specifically, Sub-figure (a) on Figure 2 plots the size of the difference between the targeted covariance $\Sigma$ and an estimated covariance $\hat{\Sigma}_k$ at each iteration $k$ of HMC, where $\hat{\Sigma}_k$ is the sample covariance of 10,000 samples collected from a number of 10,000 HMC chains at their $k_{\mathrm{th}}$ iteration. Sub-figure (b) plots a discrete TV distance that is computed as follows. We use a built-in function of Numpy to sample 10,000 samples from the target distribution, while we also have 10,000 samples collected from a number
248
+
249
+ Table 2: Sampling from a Gaussian distribution. We report 4 metrics regarding ESS (the higher the better), please see the main text for their definitions.
250
+
251
+ <table><tr><td>Step Size</td><td>Method</td><td>Mean ESS</td><td>Min ESS</td><td>Mean ESS/Sec.</td><td>Min. ESS/Sec.</td><td>Acc. Prob</td></tr><tr><td>0.001</td><td>Cheby.</td><td>5187.28 ± 261.13</td><td>307.09 ± 21.92</td><td>20.28 ± 1.74</td><td>1.20 ± 0.11</td><td>1.00 ± 0.00</td></tr><tr><td>0.001</td><td>Const.</td><td>1912.76 ± 72.10</td><td>39.87 ± 13.77</td><td>15.87 ± 0.89</td><td>0.33 ± 0.11</td><td>1.00 ± 0.00</td></tr><tr><td>0.005</td><td>Cheby.</td><td>5146.71 ± 257.65</td><td>304.126 ± 19.09</td><td>97.84 ± 9.23</td><td>5.79 ± 0.68</td><td>1.00 ± 0.00</td></tr><tr><td>0.005</td><td>Const.</td><td>1926.71 ± 136.53</td><td>32.83 ± 9.57</td><td>80.31 ± 4.39</td><td>1.37 ± 0.39</td><td>1.00 ± 0.00</td></tr><tr><td>0.01</td><td>Cheby.</td><td>5127.90 ± 211.46</td><td>279.59 ± 38.09</td><td>184.26 ± 20.99</td><td>10.01 ± 1.52</td><td>1.00 ± 0.00</td></tr><tr><td>0.01</td><td>Const.</td><td>1832.87 ± 77.47</td><td>35.71 ± 11.74</td><td>147.53 ± 12.59</td><td>2.85 ± 0.95</td><td>1.00 ± 0.00</td></tr><tr><td>0.05</td><td>Cheby.</td><td>5133.67 ± 195.07</td><td>316.87 ± 36.27</td><td>871.72 ± 88.73</td><td>53.54 ± 6.22</td><td>0.99 ± 0.00</td></tr><tr><td>0.05</td><td>Const.</td><td>1849.15 ± 92.75</td><td>34.98 ± 14.70</td><td>615.73 ± 30.16</td><td>11.70 ± 5.07</td><td>0.99 ± 0.00</td></tr><tr><td>0.1</td><td>Cheby.</td><td>4948.46 ± 144.03</td><td>281.66 ± 44.79</td><td>1492.96 ± 166.21</td><td>84.39 ± 13.04</td><td>0.99 ± 0.00</td></tr><tr><td>0.1</td><td>Const.</td><td>1852.79 ± 132.95</td><td>38.17 ± 16.35</td><td>1035.54 ± 82.34</td><td>21.44 ± 9.51</td><td>0.99 ± 0.00</td></tr></table>
252
+
253
+ ![](images/b2b9487f0192171e6f33f883042431aaa157a15b0ee649af64ef6a94d8b1df93.jpg)
254
+ (a) $\| \Sigma -\hat{\Sigma}_k\| _F$ v.s. iteration $k$
255
+
256
+ ![](images/e6e3f26a7d8dd0ae306b3d8b654d72db5a80ff7ac11c8292094bdf25993d936a.jpg)
257
+ (b) discrete $\mathrm{TV}(\hat{\pi},\hat{\rho}_k)$ v.s. iteration $k$
258
+ Figure 2: Sampling from a Gaussian distribution. Both lines correspond to HMCs with the same step size $h = 0.05$ used in the leapfrog steps (but with different schemes of the integration time). Please see the main text for the precise definitions of the quantities and the details of how we compute them.
259
+
260
+ of 10,000 HMC chains at each iteration $k$ . Using these two sets of samples, we construct two histograms with 30 number of bins for each dimension, we denote them as $\hat{\pi}$ and $\hat{\rho}_k$ . The discrete TV( $\hat{\pi},\hat{\rho}_k$ ) at iteration $k$ is 0.5 times the sum of the absolute value of the difference between the number of counts of all the pairs of the bins divided by 10,000, which serves as a surrogate of the Wasserstein-2 distance between the true target $\pi$ and $\rho_{k}$ from HMC, since computing or estimating the true Wasserstein distance is challenging.
261
+
262
+ # 4.3 SAMPLING FROM A MIXTURE OF TWO GAUSSIANS
263
+
264
+ For a vector $a \in \mathbb{R}^d$ and a positive definite matrix $\Sigma \in \mathbb{R}^{d \times d}$ , we consider sampling from a mixture of two Gaussians $\mathcal{N}(a, \Sigma)$ and $\mathcal{N}(-a, \Sigma)$ with equal weights. Denote $b := \Sigma^{-1}a$ and $\Lambda := \Sigma^{-1}$ . The potential is $f(x) = \frac{1}{2} \|x - a\|_{\Lambda}^2 - \log(1 + \exp(-2x^\top b))$ , and its gradient is $\nabla f(x) = \Lambda x - b + 2b(1 + \exp(-2x^\top b))^{-1}$ . For each dimension $i \in [d]$ , we set $a[i] = \frac{\sqrt{i}}{2d}$ and set the covariance $\Sigma = \mathrm{diag}_{1 \leq i \leq d}(\frac{i}{d})$ . The potential is strongly convex if $a^\top \Sigma^{-1}a < 1$ , see e.g., Riou-Durand & Vogrinc (2022). We set $d = 10$ in the experiment, and simply use the smallest and the largest eigenvalue of $\Lambda$ to approximate the strong convexity constant $m$ and the smoothness constant $L$ of the potential, which are $\hat{m} = 1$ and $\hat{L} = 10$ in this case. Table 3 shows that the proposed method generates a larger effective sample size than the baseline.
265
+
266
+ # 4.4 BAYESIAN LOGISTIC REGRESSION
267
+
268
+ We also consider Bayesian logistic regression to evaluate the methods. Given an observation $(z_i, y_i)$ , where $z_i \in \mathbb{R}^d$ and $y_i \in \{0, 1\}$ , the likelihood function is modeled as $p(y_i | z_i, w) = \frac{1}{1 + \exp(-y_i z_i^\top w)}$ . Moreover, the prior on the model parameter $w$ is assumed to follow a Gaussian distribution, $p(w) = N(0, \alpha^{-1} I_d)$ , where $\alpha > 0$ is a parameter. The goal is to sample $w \in \mathbb{R}^d$ from the posterior, $p(w | \{z_i, y_i\}_{i=1}^n) = p(w) \Pi_{i=1}^n p(y_i | z_i, w)$ , where $n$ is the number of data points in a dataset. The potential function $f(w)$ can be written as
269
+
270
+ $$
271
+ f (w) = \sum_ {i = 1} ^ {n} f _ {i} (w), \text {w h e r e} f _ {i} (w) = \log \left(1 + \exp \left(- y _ {i} w ^ {\top} z _ {i}\right)\right) + \alpha \frac {\| w \| ^ {2}}{2 n}. \tag {14}
272
+ $$
273
+
274
+ Table 3: Sampling from a mixture of two Gaussians
275
+
276
+ <table><tr><td>Step Size</td><td>Method</td><td>Mean ESS</td><td>Min ESS</td><td>Mean ESS/Sec.</td><td>Min. ESS/Sec.</td><td>Acc. Prob</td></tr><tr><td>0.001</td><td>Cheby.</td><td>2439.86 ± 71.83</td><td>815.20 ± 83.82</td><td>22.68 ± 0.93</td><td>7.57 ± 0.81</td><td>0.89 ± 0.00</td></tr><tr><td>0.001</td><td>Const.</td><td>845.44 ± 31.42</td><td>261.14 ± 34.34</td><td>12.90 ± 0.52</td><td>3.98 ± 0.53</td><td>0.91 ± 0.00</td></tr><tr><td>0.005</td><td>Cheby.</td><td>2399.50 ± 100.12</td><td>784.06 ± 82.07</td><td>105.97 ± 8.78</td><td>34.58 ± 4.12</td><td>0.89 ± 0.00</td></tr><tr><td>0.005</td><td>Const.</td><td>876.61 ± 25.62</td><td>277.72 ± 30.62</td><td>63.80 ± 4.67</td><td>20.22 ± 2.62</td><td>0.91 ± 0.00</td></tr><tr><td>0.01</td><td>Cheby.</td><td>2341.35 ± 89.99</td><td>794.27 ± 48.75</td><td>194.81 ± 23.51</td><td>66.30 ± 9.89</td><td>0.88 ± 0.00</td></tr><tr><td>0.01</td><td>Const.</td><td>860.61 ± 20.39</td><td>235.33 ± 33.73</td><td>110.62 ± 14.09</td><td>30.40 ± 6.34</td><td>0.91 ± 0.00</td></tr><tr><td>0.05</td><td>Cheby.</td><td>2214.19 ± 87.27</td><td>748.66 ± 46.18</td><td>761.59 ± 68.88</td><td>256.51 ± 13.76</td><td>0.89 ± 0.00</td></tr><tr><td>0.05</td><td>Const.</td><td>853.40 ± 41.05</td><td>265.70 ± 37.41</td><td>376.54 ± 67.83</td><td>116.45 ± 22.23</td><td>0.91 ± 0.00</td></tr><tr><td>0.1</td><td>Cheby.</td><td>2064.42 ± 67.44</td><td>657.45 ± 60.44</td><td>1162.67 ± 84.19</td><td>370.07 ± 41.11</td><td>0.90 ± 0.00</td></tr><tr><td>0.1</td><td>Const.</td><td>632.70 ± 22.78</td><td>182.88 ± 37.10</td><td>450.53 ± 93.60</td><td>132.58 ± 43.91</td><td>0.92 ± 0.00</td></tr></table>
277
+
278
+ We set $\alpha = 1$ in the experiments. We consider three datasets: Heart, Breast Cancer, and Diabetes binary classification datasets, which are all publicly available online. To approximate the strong convexity constant $m$ and the smoothness constant $L$ of the potential $f(w)$ , we compute the smallest eigenvalue and the largest eigenvalue of the Hessian $\nabla^2 f(w)$ at the maximizer of the posterior, and we use them as estimates of $m$ and $L$ respectively. We apply Newton's method to approximately find the maximizer of the posterior. The experimental results are reported on Table 4 in Appendix E.1 due to the space limit, which show that our method consistently outperforms the baseline.
279
+
280
+ # 4.5 SAMPLING FROM A hard DISTRIBUTION
281
+
282
+ We also consider sampling from a step-size-dependent distribution $\pi (x)\propto \exp (-f_h(x))$ , where the potential $f_{h}(\cdot)$ is $\kappa$ -smooth and 1-strongly convex. The distribution is considered in Lee et al. (2021a) for showing a lower bound regarding certain Metropolized sampling methods using a constant integration time and a constant step size $h$ of the leapfrog integrator. More concretely, the potential is
283
+
284
+ $$
285
+ f _ {h} (x) := \sum_ {i = 1} ^ {d} f _ {i} ^ {(h)} \left(x _ {i}\right), \text {w h e r e} f _ {i} ^ {(h)} \left(x _ {i}\right) = \left\{ \begin{array}{l l} \frac {1}{2} x _ {i} ^ {2}, & i = 1 \\ \frac {\kappa}{3} x _ {i} ^ {2} - \frac {\kappa h}{3} \cos \left(\frac {x _ {i}}{\sqrt {h}}\right), & 2 \leq i \leq d. \end{array} \right. \tag {15}
286
+ $$
287
+
288
+ In the experiment, we set $\kappa = 50$ and $d = 10$ . The results are reported on Table 5 in Appendix E.2. The scheme of the Chebyshev integration time is still better than the constant integration time for this task.
289
+
290
+ # 5 DISCUSSION AND OUTLOOK
291
+
292
+ The Chebyshev integration time shows promising empirical results for sampling from a various of strongly log-concave distributions. On the other hand, the theoretical guarantee of acceleration that we provide in this work is only for strongly convex quadratic potentials. Therefore, a direction left open by our work is establishing some provable acceleration guarantees for general strongly log-concave distributions. However, unlike quadratic potentials, the output (position, velocity) of a HMC flow does not have a closed-form solution in general, which makes the analysis much more challenging. A starting point might be improving the analysis of Chen & Vempala (2019), where a contraction bound of two HMC chains under a small integration time $\eta = O\left(\frac{1}{\sqrt{L}}\right)$ is shown. Since the scheme of the Chebyshev integration time requires a large integration time $\eta = \Theta\left(\frac{1}{\sqrt{m}}\right)$ at some iterations of HMC, a natural question is whether a variant of the result of Chen & Vempala (2019) can be extended to a large integration time $\eta = \Theta\left(\frac{1}{\sqrt{m}}\right)$ . We state as an open question: can ideal HMC with a scheme of time-varying integration time achieve an accelerated rate $O(\sqrt{\kappa}\log \frac{1}{\epsilon})$ for general smooth strongly log-concave distributions?
293
+
294
+ The topic of accelerating HMC with provable guarantees is underexplored, and we hope our work can facilitate the progress in this field. After the preprint of this work was available on arXiv, Jiang (2022) proposes a randomized integration time with partial velocity refreshment and provably shows that ideal HMC with the proposed machinery has the accelerated rate for sampling from a Gaussian distribution. Exploring any connections between the scheme of Jiang (2022) and ours can be an interesting direction.
295
+
296
+ # ACKNOWLEDGMENTS
297
+
298
+ We thank the reviewers for constructive feedback, which helps improve the presentation of this paper.
299
+
300
+ # REFERENCES
301
+
302
+ Naman Agarwal, Surbhi Goel, and Cyril Zhang. Acceleration via fractal learning rate schedules. ICML, 2021.
303
+ Michael Betancourt. A conceptual introduction to Hamiltonian Monte Carlo. arXiv:1701.02434, 2017.
304
+ Nawaf Bou-Rabee and Andreas Eberle. Mixing time guarantees for unadjusted Hamiltonian Monte Carlo. arXiv:2105.00887, 2021.
305
+ Nawaf Bou-Rabee and Jesus Maria Sanz-Serna. Randomized Hamiltonian Monte Carlo. Annals of Applied Probability, 2017.
306
+ James A. Brofos and Roy R. Lederman. Evaluating the implicit midpoint integrator for Riemannian manifold Hamiltonian Monte Carlo. ICML, 2021.
307
+ Tianqi Chen, Emily B. Fox, and Carlos Guestrin. Stochastic gradient Hamiltonian Monte Carlo. ICML, 2014.
308
+ Yuansi Chen, Raaz Dwivedi, Martin J. Wainwright, and Bin Yu. Fast mixing of Metropolized Hamiltonian Monte Carlo: Benefits of multi-step gradients. JMLR, 2020.
309
+ Zongchen Chen and Santosh S Vempala. Optimal convergence rate of Hamiltonian Monte Carlo for strongly logconcave distributions. International Conference on Randomization and Computation (RANDOM), 2019.
310
+ Sinho Chewi, Thibaut Le Gouic, Chen Lu, Tyler Maunu, Philippe Rigollet, and Austin J. Stromme. Exponential ergodicity of mirror-langevin diffusions. NeurIPS, 2020.
311
+ Arnak S. Dalalyan. Theoretical guarantees for approximate sampling from a smooth and log-concave density. Journal of the Royal Statistical Society: Series B, 2017.
312
+ Arnak S. Dalalyan and Lionel Riou-Durand. On sampling from a log-concave density using kinetic Langevin diffusions. Bernoulli, 2020.
313
+ Alexandre d'Aspremont, Damien Scieur, and Adrien Taylor. Acceleration methods. Foundations and Trends in Optimization, 2021.
314
+ Simon Duane, A. D. Kennedy, Brian J. Pendleton, and Duncan Roweth. Hybrid monte carlo. Physics Letters B, 1987.
315
+ John C. Duchi, Peter L. Bartlett, and Martin J. Wainwright. Randomized smoothing for stochastic optimization. SIAM Journal on Optimization, 2012.
316
+ Alain Durmus, Eric Moulines, and Eero Saksman. On the convergence of Hamiltonian Monte Carlo. arXiv:1705.00166, 2017.
317
+ Alain Durmus, Szymon Majewski, and Błajej Miasojedow. Analysis of Langevin Monte Carlo via convex optimization. JMLR, 2019.
318
+ Abraham D. Flaxman, Adam Tauman Kalai, and H. Brendan McMahan. Online convex optimization in the bandit setting: gradient descent without a gradient. SODA, 2005.
319
+ Mark Girolami and Ben Calderhead. Riemann manifold Langevin and Hamiltonian Monte Carlo methods. Journal of the Royal Statistical Society, 2011.
320
+ Marcel Hirt, Michalis K. Titsias, and Petros Dellaportas. Entropy-based adaptive Hamiltonian Monte Carlo. NeurIPS, 2021.
321
+
322
+ Matthew D. Hoffman and Andrew Gelman. The No-U-Turn sampler: Adaptively setting path lengths in Hamiltonian Monte Carlo. JMLR, 2014.
323
+ Matthew D. Hoffman, Alexey Radul, and Pavel Sountsov. An adaptive-MCMC scheme for setting trajectory lengths in Hamiltonian Monte Carlo. AISTATS, 2021.
324
+ Qijia Jiang. On the dissipation of ideal hamiltonian monte carlo sampler. arXiv:2209.07438, 2022.
325
+ Chi Jin, Rong Ge, Praneeth Netrapalli, Sham M. Kakade, and Michael I. Jordan. How to escape saddle points efficiently. ICML, 2017.
326
+ Daphne Koller and Nir Friedman. Probabilistic graphical models: Principles and techniques. MIT Press, 2009.
327
+ Ravin Kumar, Colin Carroll, Ari Hartikainen, and Osvaldo Martin. Arviz a unified library for exploratory analysis of bayesian models in python. The Journal of Open Source Software, 2019.
328
+ Yin Tat Lee, Ruoqi Shen, and Kevin Tian. Lower bounds on Metropolized sampling methods for well-conditioned distributions. NeurIPS, 2021a.
329
+ Yin Tat Lee, Ruoqi Shen, and Kevin Tian. Structured logconcave sampling with a restricted gaussian oracle. $COLT$ , 2021b.
330
+ Daniel Levy, Matthew D. Hoffman, and Jascha Sohl-Dickstein. Generalizing Hamiltonian Monte Carlo with neural networks. *ICLR*, 2018.
331
+ Ruilin Li, Hongyuan Zha, and Molei Tao. *Sqrt(d)* dimension dependence of Langevin Monte Carlo. *ICLR*, 2022.
332
+ Yi-An Ma, Niladri S. Chatterji, Xiang Cheng, Nicolas Flammarion, Peter L. Bartlett, and Michael I. Jordan. Is there an analog of Nesterov acceleration for MCMC? Bernoulli, 2021.
333
+ Oren Mangoubi and Aaron Smith. Mixing of Hamiltonian Monte Carlo on strongly logconcave distributions 2: Numerical integrators. AISTATS, 2019.
334
+ Oren Mangoubi and Aaron Smith. Mixing of Hamiltonian Monte Carlo on strongly logconcave distributions 1: Continuous dynamics. Annals of Applied Probability, 2021.
335
+ Oren Mangoubi and Nisheeth K. Vishnoi. Dimensionally tight bounds for second-order Hamiltonian Monte Carlo. NeurIPS, 2018.
336
+ Radford M. Neal. MCMC using Hamiltonian dynamics. arXiv:1206.1901, 2012.
337
+ Yuri Nesterov. Introductory lectures on convex optimization: a basic course. Springer, 2013.
338
+ Fabian Pedregosa. Acceleration without momentum, 2021. URL http://fa.bianp.net/blog/2021/no-momentum/.
339
+ B.T. Polyak. Some methods of speeding up the convergence of iteration methods. USSR Computational Mathematics and Mathematical Physics, 1964.
340
+ Lionel Riou-Durand and Jure Vogrinc. Metropolis Adjusted Langevin trajectories: a robust alternative to Hamiltonian Monte Carlo. arXiv:2202.13230, 2022.
341
+ Ruslan Salakhutdinov and Andriy Mnih. Bayesian probabilistic matrix factorization using Markov chain Monte Carlo. ICML, 2008.
342
+ Greg Ver Steeg and Aram Galstyan. Hamiltonian dynamics with non-newtonian momentum for rapid sampling. NeurIPS, 2021.
343
+ Nilesh Tripuraneni, Mark Rowland, Zoubin Ghahramani, and Richard Turner. Magnetic Hamiltonian Monte Carlo. ICML, 2017.
344
+ Santosh S. Vempala and Andre Wibisono. Rapid convergence of the Unadjusted Langevin Algorithm: Isoperimetry suffices. NeurIPS, 2019.
345
+
346
+ Nisheeth K. Vishnoi. An introduction to Hamiltonian Monte Carlo method for sampling. arXiv:2108.12107, 2021.
347
+ Jun-Kun Wang, Chi-Heng Lin, and Jacob Abernethy. Escaping saddle points faster with stochastic momentum. *ICLR*, 2020.
348
+ Jun-Kun Wang, Chi-Heng Lin, and Jacob Abernethy. A modular analysis of provable acceleration via Polyak's momentum: Training a wide ReLU network and a deep linear network. ICML, 2021.
349
+ Jun-Kun Wang, Chi-Heng Lin, Andre Wibisono, and Bin Hu. Provable Acceleration of Heavy Ball beyond Quadratics for a Class of Polyak-Lojasiewicz Functions when the Non-Convexity is Averaged-Out. ICML, 2022.
350
+ Max Welling and Yee Whye Teh. Bayesian learning via Stochastic Gradient Langevin dynamics. ICML, 2011.
351
+ Florian Wenzel, Kevin Roth, Bastiaan S. Veeling, Jakub Swiatkowski, Linh Tran, Stephan Mandt, Jasper Snoek, Tim Salimans, Rodolphe Jenatton, and Sebastian Nowozin. How good is the Bayes posterior in deep neural networks really. ICML, 2020.
352
+ Difan Zou and Quanquan Gu. On the convergence of Hamiltonian Monte Carlo with stochastic gradients. ICML, 2021.
353
+
354
+ # A A CONNECTION BETWEEN OPTIMIZATION AND SAMPLING
355
+
356
+ To provide an intuition of why the technique of Chebyshev polynomials can help accelerate HMC for the case of the strongly convex quadratic potentials, we would like to describe the work of gradient descent with the Chebyshev step sizes Agarwal et al. (2021) in more detail, because we are going to draw a connection between optimization and sampling to showcase the intuition. Agarwal et al. (2021) provably show that gradient descent with a scheme of step sizes based on the Chebyshev Polynomials has an accelerated rate for minimizing strongly convex quadratic functions compared to GD with a constant step size, and their experiments show some promising results for minimizing smooth strongly convex functions beyond quadratics via the proposed scheme of step sizes. More precisely, define $f(w) = \frac{1}{2} w^{\top}Aw$ , where $A\in \mathbb{R}^{d\times d}$ is a positive definite matrix which has eigenvalues $L\coloneqq \lambda_1\geq \lambda_2\geq \dots \geq \lambda_d\eqqcolon m$ . Agarwal et al. (2021) consider applying gradient descent
357
+
358
+ $$
359
+ w _ {k + 1} = w _ {k} - \eta_ {k} \nabla f (w _ {k})
360
+ $$
361
+
362
+ to minimize $f(\cdot)$ , where $\eta_{k}$ is the step size of gradient descent at iteration $k$ . Let $w$ be the unique global minimizer of $f(\cdot)$ . It is easy to show that the dynamic of the distance evolves as
363
+
364
+ $$
365
+ w _ {k + 1} - w _ {*} = \left(I _ {d} - \eta_ {k} A\right) \left(I _ {d} - \eta_ {k - 1} A\right) \dots \left(I _ {d} - \eta_ {1} A\right) \left(w _ {1} - w _ {*}\right).
366
+ $$
367
+
368
+ Hence, the size of the distance to $w_{*}$ at iteration $K + 1$ is bounded by
369
+
370
+ $$
371
+ \| w _ {K + 1} - w _ {*} \| \leq \max _ {j \in [ d ]} | \prod_ {k = 1} ^ {K} (1 - \eta_ {k} \lambda_ {j}) | \| w _ {1} - w _ {*} \|.
372
+ $$
373
+
374
+ This shows that the convergence rate of GD is governed by $\max_{j\in [d]}\left|\prod_{k = 1}^{K}(1 - \eta_k\lambda_j)\right|$ . By setting $\eta_{k}$ as the inverse of the Chebyshev root $r_k^{(K)}$ or any permuted root $r_{\sigma (k)}^{(K)}$ (see (8) for the definition), the polynomial $\prod_{k = 1}^{K}(1 - \eta_k\lambda)$ is actually the $K$ -degree scale-and-shifted polynomial, i.e., $\prod_{k = 1}^{K}(1 - \eta_k\lambda) = \prod_{k = 1}^{K}\left(1 - \frac{\lambda}{r_{\sigma(k)}^{(K)}}\right) = \bar{\Phi}_k(\lambda)$ (see (7) for the definition). It is well-known in the literature of optimization and numerical linear algebra that the $K$ -degree scale-and-shifted polynomial satisfies
375
+
376
+ $$
377
+ \max _ {\lambda \in [ m, L ]} \left| \bar {\Phi} _ {K} (\lambda) \right| \leq 2 \left(1 - 2 \frac {\sqrt {m}}{\sqrt {L} + \sqrt {m}}\right) ^ {K} = O \left(\left(1 - \Theta \left(\sqrt {\frac {m}{L}}\right)\right) ^ {K}\right),
378
+ $$
379
+
380
+ which is restated in Lemma 3 and its proof is replicated in Appendix B of our paper for the reader's convenience. Applying this result, one gets a simple proof of the accelerated linear rate of GD with the proposed scheme of step sizes for minimizing quadratic functions. A nice blog article by Pedregosa (2021) explains this in detail.
381
+
382
+ Now we are ready to highlight its connection with HMC. In Lemma 1 of the paper, we restate a known result in HMC literature, where its proof is also replicated in Appendix B for the reader's convenience. The lemma indicates that the convergence rate of HMC is governed by $\max_{j\in [d]}\left|\prod_{k = 1}^{K}\cos (\sqrt{2\lambda_j}\eta_k^{(K)})\right|$ . By way of comparison to that of GD for minimizing quadratic functions, i.e., $\max_{j\in [d]}\left|\prod_{k = 1}^{K}(1 - \eta_k\lambda_j)\right|$ , it appears that they share some similarity, which made us wonder if we could bound the former by the latter. We show in Lemma 4 that $\cos \left(\frac{\pi}{2}\sqrt{x}\right)\leq 1 - x$ which holds for all $x\geq 0$ , and consequently,
383
+
384
+ $$
385
+ | P _ {K} ^ {\mathrm {C o s}} (\lambda) | := \left| \prod_ {k = 1} ^ {K} \cos \left(\frac {\pi}{2} \sqrt {\frac {\lambda}{r _ {\sigma (k)} ^ {(K)}}}\right) \right| \leq \left| \prod_ {k = 1} ^ {K} \left(1 - \frac {\lambda}{r _ {\sigma (k)} ^ {(K)}}\right) \right| = | \bar {\Phi} _ {K} (\lambda) |,
386
+ $$
387
+
388
+ The key lemma above implies that if we set the integration time as $\eta_k^{(K)} = \frac{\pi}{2}\frac{1}{\sqrt{2r_{\sigma(k)}}}$ , then we get acceleration of HMC.
389
+
390
+ # B PROOFS OF LEMMAS IN SECTION 2
391
+
392
+ We restate the lemmas for the reader's convenience.
393
+
394
+ Lemma 1. (Vishnoi, 2021) Let $x_0, y_0 \in \mathbb{R}^d$ . Consider the following coupling: $(x_t, v_t) = \mathrm{HMC}_t(x_0, \xi)$ and $(y_t, u_t) = \mathrm{HMC}_t(y_0, \xi)$ for some $\xi \in \mathbb{R}^d$ . Then for all $t \geq 0$ and for all $j \in [d]$ , it holds that
395
+
396
+ $$
397
+ x _ {t} [ j ] - y _ {t} [ j ] = \cos \left(\sqrt {2 \lambda_ {j}} t\right) \times \left(x _ {0} [ j ] - y _ {0} [ j ]\right).
398
+ $$
399
+
400
+ Proof. Given $(x_{t}, v_{t}) := \mathrm{HMC}_{t}(x_{0}, \xi)$ and $(y_{t}, u_{t}) := \mathrm{HMC}_{t}(y_{0}, \xi)$ , we have $\frac{dv_t}{dt} - \frac{du_t}{dt} = -\nabla f(x_t) + \nabla f(y_t) = 2\Lambda(y_t - x_t)$ . Therefore, we have $\frac{d^2(x_t[j] - y_t[j])}{dt^2} = -2\lambda_j(x_t[j] - y_t[j])$ for all $j \in [d]$ . Because of the initial condition $\frac{dx_0[j]}{dt} = \frac{dy_0[j]}{dt} = \xi[j]$ , the differential equation implies that $x_{t}[j] - y_{t}[j] = \cos(\sqrt{2\lambda_{j}} t) \times (x_{0}[j] - y_{0}[j])$ .
401
+
402
+ It is noted that the result also follows directly from the explicit solution (4).
403
+
404
+ Lemma 2. (Vishnoi, 2021) Let $\pi \propto \exp(-f) = \mathcal{N}(0, \Lambda^{-1})$ be the target distribution, where $f(x)$ is defined on (3). Let $\rho_{K}$ be the distribution of $x_{K}$ generated by Algorithm 1 at the final iteration $K$ . Then for any $\rho_{0}$ and any $K \geq 1$ , we have
405
+
406
+ $$
407
+ W _ {2} (\rho_ {K}, \pi) \leq \max _ {j \in [ d ]} \left| \Pi_ {k = 1} ^ {K} \cos \left(\sqrt {2 \lambda_ {j}} \eta_ {k} ^ {(K)}\right) \right| W _ {2} (\rho_ {0}, \pi).
408
+ $$
409
+
410
+ Proof. Starting from $x_0 \sim \rho_0$ , draw an initial point $y_0 \sim \pi$ such that $(x_0, y_0)$ has the optimal $W_2$ -coupling between $\rho_0$ and $\pi$ . Consider the following coupling at each iteration $k$ : $(x_k, v_k) = \mathrm{HMC}_{\eta_k^{(K)}}(x_{k-1}, \xi_k)$ and $(y_k, u_k) = \mathrm{HMC}_{\eta_k^{(K)}}(y_{k-1}, \xi_k)$ where $\xi_k \sim \mathcal{N}(0, I)$ is an independent Gaussian. We collect $\{x_k\}_{k=1}^K$ and $\{y_k\}_{k=1}^K$ from Algorithm 1. We know each $y_k \sim \pi$ , since $\pi$ is a
411
+
412
+ stationary distribution of the HMC Markov chain. Then by Lemma 1 we have
413
+
414
+ $$
415
+ \begin{array}{l} W _ {2} ^ {2} \left(\rho_ {K}, \pi\right) \leq \mathbb {E} \left[ \left\| x _ {K} - y _ {K} \right\| ^ {2} \right] \\ = \mathbb {E} [ \sum_ {j \in [ d ]} (x _ {K} [ j ] - y _ {K} [ j ]) ^ {2} ] \\ = \mathbb {E} \left[ \sum_ {j \in [ d ]} \left(\Pi_ {k = 1} ^ {K} \cos \left(\sqrt {2 \lambda_ {j}} \eta_ {k} ^ {(K)}\right) \times \left(x _ {0} [ j ] - y _ {0} [ j ]\right)\right) ^ {2} \right] \tag {16} \\ \leq \left(\max _ {j \in [ d ]} \left(\Pi_ {k = 1} ^ {K} \cos \left(\sqrt {2 \lambda_ {j}} \eta_ {k} ^ {(K)}\right)\right) ^ {2}\right) \mathbb {E} [ \sum_ {j \in [ d ]} (x _ {0} [ j ] - y _ {0} [ j ]) ^ {2} ] \\ = \left(\max _ {j \in [ d ]} \left(\Pi_ {k = 1} ^ {K} \cos \left(\sqrt {2 \lambda_ {j}} \eta_ {k} ^ {(K)}\right)\right) ^ {2}\right) W _ {2} ^ {2} (\rho_ {0}, \pi), \\ \end{array}
416
+ $$
417
+
418
+ Taking the square root on both sides leads to the result.
419
+
420
+ ![](images/0ba859f021dd8895d0483b44fa4892730b7903e40fbc2394c95b209714d318dc.jpg)
421
+
422
+ Lemma 3. (e.g., Section 2.3 in d'Aspremont et al. (2021)) For any positive integer $K$ , we have
423
+
424
+ $$
425
+ \left. \right. \max _ {\lambda \in [ m, L ]} \left| \bar {\Phi} _ {K} (\lambda) \right| \leq 2 \left(1 - 2 \frac {\sqrt {m}}{\sqrt {L} + \sqrt {m}}\right) ^ {K} = O \left(\left(1 - \Theta \left(\sqrt {\frac {m}{L}}\right)\right) ^ {K}\right). \tag {17}
426
+ $$
427
+
428
+ Proof. Observe that the numerator of $\bar{\Phi}_K(\lambda) = \frac{\Phi_K(h(\lambda))}{\Phi_K(h(0))}$ satisfies $|\Phi_K(h(\lambda))| \leq 1$ , since $h(\lambda) \in [-1,1]$ for $\lambda \in [m,L]$ and that the Chebyshev polynomial satisfies $|\Phi_K(\cdot)| \leq 1$ when its argument is in $[-1,1]$ by the definition. It remains to bound the denominator, which is $\Phi_K(h(0)) = \cosh \left(K \operatorname{arccosh}\left(\frac{L + m}{L - m}\right)\right)$ . Since
429
+
430
+ $$
431
+ \operatorname {a r c c o s h} \left(\frac {L + m}{L - m}\right) = \log \left(\frac {L + m}{L - m} + \sqrt {\left(\frac {L + m}{L - m}\right) ^ {2} - 1}\right) = \log (\theta), \text {w h e r e} \theta := \frac {\sqrt {L} + \sqrt {m}}{\sqrt {L} - \sqrt {m}},
432
+ $$
433
+
434
+ we have
435
+
436
+ $$
437
+ \Phi_ {K} (h (0)) = \cosh \left(K \operatorname {a r c c o s h} \left(\frac {L + m}{L - m}\right)\right) = \frac {\exp (K \log (\theta)) + \exp (- K \log (\theta))}{2} = \frac {\theta^ {K} + \theta^ {- K}}{2} \geq \frac {\theta^ {K}}{2}.
438
+ $$
439
+
440
+ Combing the above inequalities, we obtain the desired result:
441
+
442
+ $$
443
+ \begin{array}{l} \max _ {\lambda \in [ m, L ]} \left| \bar {\Phi} _ {K} (\lambda) \right| = \max _ {\lambda \in [ m, L ]} \left| \frac {\Phi_ {K} (h (\lambda))}{\Phi_ {K} (h (0))} \right| \leq \frac {2}{\theta^ {K}} = 2 \left(1 - 2 \frac {\sqrt {m}}{\sqrt {L} + \sqrt {m}}\right) ^ {K} \\ = O \left(\left(1 - \Theta \left(\sqrt {\frac {m}{L}}\right)\right) ^ {K}\right). \\ \end{array}
444
+ $$
445
+
446
+ ![](images/f41e6e153d9611c48172597e8e81d6d45dff29496134f9ecc2f303dab58fa858.jpg)
447
+
448
+ # C PROOF OF LEMMA 4
449
+
450
+ Lemma 4. Denote $|P_K^{\mathrm{Cos}}(\lambda)| \coloneqq \left|\Pi_{k = 1}^{K}\cos \left(\frac{\pi}{2}\sqrt{\frac{\lambda}{r_{\sigma(k)}^{(K)}}}\right)\right|$ . Suppose $\lambda \in [m,L]$ . Then, we have for any positive integer $K$ ,
451
+
452
+ $$
453
+ \left| P _ {K} ^ {\operatorname {C o s}} (\lambda) \right| \leq \left| \bar {\Phi} _ {K} (\lambda) \right|. \tag {18}
454
+ $$
455
+
456
+ Proof. We use the fact that the $K$ -degree scaled-and-shifted Chebyshev Polynomial can be written as,
457
+
458
+ $$
459
+ \bar {\Phi} _ {K} (\lambda) = \Pi_ {k = 1} ^ {K} \left(1 - \frac {\lambda}{r _ {\sigma (k)} ^ {(K)}}\right), \tag {19}
460
+ $$
461
+
462
+ for any permutation $\sigma(\cdot)$ , since $\{r_{\sigma(k)}^{(K)}\}$ are its roots and $\bar{\Phi}_K(0) = 1$ . So inequality (18) is equivalent to
463
+
464
+ $$
465
+ \left| \Pi_ {k = 1} ^ {K} \cos \left(\frac {\pi}{2} \sqrt {\frac {\lambda}{r _ {\sigma (k)} ^ {(K)}}}\right) \right| \leq \left| \Pi_ {k = 1} ^ {K} \left(1 - \frac {\lambda}{r _ {\sigma (k)} ^ {(K)}}\right) \right|. \tag {20}
466
+ $$
467
+
468
+ To show (20), let us analyze the mapping $\psi(x) \coloneqq \frac{\cos\left(\frac{\pi}{2}\sqrt{x}\right)}{1 - x}$ for $x \geq 0, x \neq 1$ , with $\psi(1) = \frac{\pi}{4}$ by continuity, and show that $\max_{x : x \geq 0} |\psi(x)| \leq 1$ , as (20) would be immediate. We have $\psi'(x) = -\frac{\pi}{4\sqrt{x}} \frac{1}{1 - x} \sin\left(\frac{\pi}{2}\sqrt{x}\right) + \cos\left(\frac{\pi}{2}\sqrt{x}\right) \frac{1}{(1 - x)^2}$ . Hence, $\psi'(x) = 0$ when
469
+
470
+ $$
471
+ \tan \left(\frac {\pi}{2} \sqrt {x}\right) = \frac {4 \sqrt {x}}{\pi (1 - x)}. \tag {21}
472
+ $$
473
+
474
+ Denote an extreme point of $\psi(x)$ as $\hat{x}$ , which satisfies (21). Then, using (21), we have $|\psi(\hat{x})| = \left|\frac{\cos\left(\frac{\pi}{2}\sqrt{\hat{x}}\right)}{1 - \hat{x}}\right| = \left|\frac{\pi}{\sqrt{16\hat{x} + \pi^2(1 - \hat{x})^2}}\right|$ , where we used $\cos\left(\frac{\pi}{2}\sqrt{\hat{x}}\right) = \frac{\pi(1 - \hat{x})}{\sqrt{16\hat{x} + \pi^2(1 - \hat{x})^2}}$ or
475
+
476
+ $\frac{-\pi(1 - \hat{x})}{\sqrt{16\hat{x} + \pi^2(1 - \hat{x})^2}}$ . The denominator $\sqrt{16\hat{x} + \pi^2(1 - \hat{x})^2}$ has the smallest value at $\hat{x} = 0$ , which means that the largest value of $|\psi(x)|$ happens at $x = 0$ , which is 1. The proof is now completed.
477
+
478
+ ![](images/14fb0ac0df420a6a34595637fc92c4ca3e5ee879f9c52c2e2b223976d4f0a7a0.jpg)
479
+
480
+ # D A COMPARISON OF THE TOTAL INTEGRATION TIME (JIANG, 2022)
481
+
482
+ Since the Chebyshev integration time are set to be some large values at some steps of HMC, it is natural to ask if the number of steps to get an $\epsilon$ 2-Wasserstein distance is a fair metric. In this section, we consider the total integration time $\sum_{k=1}^{K} \eta_k^{(K)}$ to get an $\epsilon$ distance as another metric for the convergence. It is noted that the comparison between HMC with our integration time and HMC with the best constant integration time has been conducted by Jiang (2022), and our previous version did not have such a comparison. Below, we reproduce the comparison of Jiang (2022).
483
+
484
+ Recall the number of iterations to get an $\epsilon$ 2-Wasserstein distance to the target distribution is $K = O\left(\sqrt{\kappa}\log \left(\frac{1}{\epsilon}\right)\right)$ of HMC with the Chebyshev integration time (Theorem 1 in the paper). The average of the integration time is
485
+
486
+ $$
487
+ \frac {1}{K} \sum_ {k = 1} ^ {K} \eta_ {k} ^ {(K)} = \frac {1}{K} \sum_ {k = 1} ^ {K} \frac {\pi}{2 \sqrt {2}} \frac {1}{\sqrt {r _ {\sigma (k)} ^ {(K)}}} = \frac {1}{K} \sum_ {k = 1} ^ {K} \frac {\pi}{2 \sqrt {2}} \frac {1}{\sqrt {r _ {k} ^ {(K)}}},
488
+ $$
489
+
490
+ where we recall that a permutation $\sigma(\cdot)$ does not affect the average.
491
+
492
+ Then, if $K$ is even, we can rewrite the averaged integration time as
493
+
494
+ $$
495
+ \frac {1}{K} \sum_ {k = 1} ^ {K} \eta_ {k} ^ {(K)} = \frac {1}{K} \frac {\pi}{2 \sqrt {2}} \sum_ {k = 1} ^ {K / 2} \left(\frac {1}{\sqrt {r _ {k} ^ {(K)}}} + \frac {1}{\sqrt {r _ {K + 1 - k} ^ {(K)}}}\right).
496
+ $$
497
+
498
+ Otherwise, $K$ is odd, and we can rewrite the averaged integration time as
499
+
500
+ $$
501
+ \frac {1}{K} \sum_ {k = 1} ^ {K} \eta_ {k} ^ {(K)} = \frac {1}{K} \frac {\pi}{2 \sqrt {2}} \left(\frac {1}{\sqrt {r _ {(K + 1) / 2} ^ {(K)}}} + \sum_ {k = 1} ^ {(K - 1) / 2} \left(\frac {1}{\sqrt {r _ {k} ^ {(K)}}} + \frac {1}{\sqrt {r _ {K + 1 - k} ^ {(K)}}}\right)\right).
502
+ $$
503
+
504
+ We will show
505
+
506
+ $$
507
+ \frac {1}{\sqrt {r _ {k} ^ {(K)}}} + \frac {1}{\sqrt {r _ {K + 1 - k} ^ {(K)}}} \leq \frac {1}{\sqrt {r _ {\lfloor K / 2 \rfloor} ^ {(K)}}} + \frac {1}{\sqrt {r _ {K - \lfloor K / 2 \rfloor + 1} ^ {(K)}}},
508
+ $$
509
+
510
+ for any $k = \{1,2,\dots ,\lfloor \frac{K}{2}\rfloor \}$ soon. Given this, we can further upper-bound the averaged integration time as
511
+
512
+ $$
513
+ \frac {1}{K} \sum_ {k = 1} ^ {K} \eta_ {k} ^ {(K)} \leq \frac {\pi}{4 \sqrt {2}} \left(\frac {1}{\sqrt {r _ {\lfloor K / 2 \rfloor} ^ {(K)}}} + \frac {1}{\sqrt {r _ {K - \lfloor K / 2 \rfloor + 1} ^ {(K)}}}\right),
514
+ $$
515
+
516
+ when $K$ is even; when $K$ is odd, we can upper-bound the averaged integration time as
517
+
518
+ $$
519
+ \frac {1}{K} \sum_ {k = 1} ^ {K} \eta_ {k} ^ {(K)} \leq \frac {1}{K} \frac {\pi}{2 \sqrt {2}} \left(\frac {1}{\sqrt {r _ {(K + 1) / 2} ^ {(K)}}} + \frac {K - 1}{2} \left(\frac {1}{\sqrt {r _ {\lfloor K / 2 \rfloor} ^ {(K)}}} + \frac {1}{\sqrt {r _ {K - \lfloor K / 2 \rfloor + 1} ^ {(K)}}}\right)\right).
520
+ $$
521
+
522
+ Using the definition of the Chebyshev root, we have
523
+
524
+ $$
525
+ r _ {\lfloor K / 2 \rfloor} ^ {(K)} = \frac {L + m}{2} - \frac {L - m}{2} \cos \left(\frac {\left(\lfloor \frac {K}{2} \rfloor - \frac {1}{2}\right) \pi}{K}\right) \approx \frac {L + m}{2},
526
+ $$
527
+
528
+ where the approximation is because $\frac{(\lfloor\frac{K}{2}\rfloor - \frac{1}{2})\pi}{K} \approx \frac{\pi}{2}$ when $K$ is large, and hence $\cos \left(\frac{(\lfloor\frac{K}{2}\rfloor - \frac{1}{2})\pi}{K}\right) \approx 0$ . Similarly, we can approximate
529
+
530
+ $$
531
+ r _ {K - \lfloor K / 2 \rfloor + 1} ^ {(K)} = \frac {L + m}{2} - \frac {L - m}{2} \cos \left(\frac {\left(K - \lfloor K / 2 \rfloor + 1 - \frac {1}{2}\right) \pi}{K}\right) \approx \frac {L + m}{2}
532
+ $$
533
+
534
+ as $\frac{(K - \lfloor K / 2\rfloor + 1 - \frac{1}{2})\pi}{K}\approx \frac{\pi}{2}$ when $K$ is large. Also, we can approximate $r_{(K + 1) / 2}^{(K)}\approx \frac{L + m}{2}$ when $K$ is odd and large for the same reason.
535
+
536
+ Combining the above, the total integration time of HMC with the Chebyshev scheme can be approximated as
537
+
538
+ number of iterations $\times$ average integration time
539
+
540
+ $$
541
+ = \sqrt {\kappa} \log \left(\frac {1}{\epsilon}\right) \times \frac {1}{K} \sum_ {k = 1} ^ {K} \eta_ {k} ^ {(K)} \approx \sqrt {\kappa} \log \left(\frac {1}{\epsilon}\right) \times \frac {\pi}{2} \frac {1}{\sqrt {L + m}}.
542
+ $$
543
+
544
+ When $\kappa \coloneqq \frac{L}{m}$ is large, the total integration time becomes
545
+
546
+ $$
547
+ \sqrt {\kappa} \log \left(\frac {1}{\epsilon}\right) \times \frac {\pi}{2} \frac {1}{\sqrt {L + m}} = \Theta \left(\frac {1}{\sqrt {m}} \log \left(\frac {1}{\epsilon}\right)\right). \tag {22}
548
+ $$
549
+
550
+ Now let us switch to analyzing HMC with the best constant integration time $\eta = \Theta\left(\frac{1}{\sqrt{L}}\right)$ (see e.g., (5), Vishnoi (2021)), which has the non-accelerated rate. Specifically, it needs $K = O\left(\kappa \log \left(\frac{1}{\epsilon}\right)\right)$ iterations to converge to the target distribution. Hence, the total integration time of HMC with the best constant integration time is
551
+
552
+ number of iterations $\times$ average integration time $= \kappa \log \left(\frac{1}{\epsilon}\right) \times \Theta \left(\frac{1}{\sqrt{L}}\right) = \Theta \left(\frac{\sqrt{L}}{m} \log \left(\frac{1}{\epsilon}\right)\right)$ . (23)
553
+
554
+ By way of comparison ((22) vs. (23)), we see that the total integration time of HMC with the proposed scheme of Chebyshev integration time reduces by a factor $\sqrt{\kappa}$ , compared with HMC with the best constant integration time.
555
+
556
+ The remaining thing to show is the inequality
557
+
558
+ $$
559
+ \frac {1}{\sqrt {r _ {k} ^ {(K)}}} + \frac {1}{\sqrt {r _ {K + 1 - k} ^ {(K)}}} \leq \frac {1}{\sqrt {r _ {\lfloor K / 2 \rfloor} ^ {(K)}}} + \frac {1}{\sqrt {r _ {K + 1 - \lfloor K / 2 \rfloor} ^ {(K)}}}, \tag {24}
560
+ $$
561
+
562
+ for any $k = \{1,2,\dots ,\left\lfloor \frac{K}{2}\right\rfloor \}$
563
+
564
+ We have
565
+
566
+ $$
567
+ \begin{array}{l} \frac {1}{\sqrt {r _ {k} ^ {(K)}}} + \frac {1}{\sqrt {r _ {K + 1 - k} ^ {(K)}}} \\ = \sqrt {2} \times \left(\frac {1}{\sqrt {L + m - (L - m) \cos \left(\frac {(k - \frac {1}{2}) \pi}{K}\right)}} + \frac {1}{\sqrt {L + m - (L - m) \cos \left(\frac {(K - k + \frac {1}{2}) \pi}{K}\right)}}\right) \\ = \sqrt {2} \times \left(\frac {1}{\sqrt {L + m - (L - m) \cos \left(\frac {\left(k - \frac {1}{2}\right) \pi}{K}\right)}} + \frac {1}{\sqrt {L + m + (L - m) \cos \left(\frac {\left(k - \frac {1}{2}\right) \pi}{K}\right)}}\right). \tag {25} \\ \end{array}
568
+ $$
569
+
570
+ Now let us define $H(k) \coloneqq \left( \frac{1}{\sqrt{L + m - (L - m)\cos\left(\frac{(k - \frac{1}{2})\pi}{K}\right)}} + \frac{1}{\sqrt{L + m + (L - m)\cos\left(\frac{(k - \frac{1}{2})\pi}{K}\right)}} \right)$ and
571
+
572
+ treat $k$ as a continuous variable.
573
+
574
+ The derivative of $H(k)$ is
575
+
576
+ $$
577
+ \begin{array}{l} H ^ {\prime} (k) = \frac {\pi}{2 K} (L - m) \sin \left(\frac {(k - \frac {1}{2}) \pi}{K}\right) \times \\ \left( \begin{array}{l} \frac {1}{\left(L + m - (L - m) \cos \left(\frac {\left(k - \frac {1}{2}\right) \pi}{K}\right)\right) ^ {3 / 2}} - \frac {1}{\left(L + m + (L - m) \cos \left(\frac {\left(k - \frac {1}{2}\right) \pi}{K}\right)\right) ^ {3 / 2}} \\ > 0. \end{array} \right) \tag {26} \\ \end{array}
578
+ $$
579
+
580
+ That is, $H^{\prime}(k)$ is an increasing function of $k$ when $1 \leq k \leq \left\lfloor \frac{K}{2} \right\rfloor$ , which implies that the inequality (24). Now we have completed the analysis.
581
+
582
+ # EXPERIMENTS
583
+
584
+ # E.1 BAYESIAN LOGISTIC REGRESSION
585
+
586
+ Table 4: Bayesian logistic regression
587
+
588
+ <table><tr><td colspan="7">HEART dataset (m=2.59, L=92.43)</td></tr><tr><td>Step Size</td><td>Method</td><td>Mean ESS</td><td>Min ESS</td><td>Mean ESS/Sec.</td><td>Min. ESS/Sec.</td><td>Acc. Prob</td></tr><tr><td>0.001</td><td>Cheby.</td><td>1693.71 ± 63.53</td><td>520.43 ± 62.24</td><td>18.54 ± 2.88</td><td>5.69 ± 1.12</td><td>1.00 ± 0.00</td></tr><tr><td>0.001</td><td>Const.</td><td>312.18 ± 12.65</td><td>80.97 ± 15.97</td><td>6.57 ± 0.42</td><td>1.69 ± 0.28</td><td>1.00 ± 0.00</td></tr><tr><td>0.005</td><td>Cheby.</td><td>1664.87 ± 43.72</td><td>481.76 ± 49.00</td><td>82.90 ± 16.51</td><td>24.08 ± 5.72</td><td>0.99 ± 0.00</td></tr><tr><td>0.005</td><td>Const.</td><td>329.48 ± 13.15</td><td>75.78 ± 17.30</td><td>31.87 ± 2.73</td><td>7.40 ± 2.06</td><td>0.99 ± 0.00</td></tr><tr><td>0.01</td><td>Cheby.</td><td>1648.25 ± 47.50</td><td>508.69 ± 49.81</td><td>157.09 ± 26.70</td><td>48.45 ± 9.64</td><td>0.99 ± 0.00</td></tr><tr><td>0.01</td><td>Const.</td><td>307.52 ± 8.77</td><td>82.85 ± 13.88</td><td>53.89 ± 6.37</td><td>14.62 ± 3.28</td><td>0.99 ± 0.00</td></tr><tr><td>0.05</td><td>Cheby.</td><td>1424.21 ± 54.03</td><td>439.88 ± 56.25</td><td>458.56 ± 51.33</td><td>140.51 ± 16.58</td><td>0.98 ± 0.00</td></tr><tr><td>0.05</td><td>Const.</td><td>242.44 ± 14.61</td><td>56.42 ± 17.68</td><td>103.36 ± 12.64</td><td>23.90 ± 7.40</td><td>0.98 ± 0.00</td></tr><tr><td colspan="7">BREAST CANCER dataset (m=1.81, L=69.28)</td></tr><tr><td>Step Size</td><td>Method</td><td>Mean ESS</td><td>Min ESS</td><td>Mean ESS/Sec.</td><td>Min. ESS/Sec.</td><td>Acc. Prob</td></tr><tr><td>0.001</td><td>Cheby.</td><td>1037.98 ± 34.46</td><td>575.72 ± 41.14</td><td>9.40 ± 0.31</td><td>5.21 ± 0.31</td><td>1.00 ± 0.00</td></tr><tr><td>0.001</td><td>Const.</td><td>174.73 ± 13.91</td><td>78.24 ± 23.28</td><td>2.59 ± 0.29</td><td>2.59 ± 0.29</td><td>1.00 ± 0.00</td></tr><tr><td>0.005</td><td>Cheby.</td><td>1010.49 ± 24.15</td><td>571.03 ± 36.64</td><td>43.09 ± 1.14</td><td>24.35 ± 1.70</td><td>0.99 ± 0.00</td></tr><tr><td>0.005</td><td>Const.</td><td>173.17 ± 11.40</td><td>79.76 ± 13.49</td><td>11.88 ± 1.39</td><td>11.88 ± 1.39</td><td>0.99 ± 0.00</td></tr><tr><td>0.01</td><td>Cheby.</td><td>1038.10 ± 31.48</td><td>565.54 ± 50.51</td><td>82.82 ± 3.51</td><td>45.14 ± 4.44</td><td>0.99 ± 0.00</td></tr><tr><td>0.01</td><td>Const.</td><td>162.64 ± 9.43</td><td>58.79 ± 16.02</td><td>18.92 ± 2.59</td><td>18.92 ± 2.59</td><td>0.99 ± 0.00</td></tr><tr><td>0.05</td><td>Cheby.</td><td>886.24 ± 38.92</td><td>499.54 ± 43.99</td><td>240.08 ± 12.55</td><td>135.28 ± 12.04</td><td>0.98 ± 0.00</td></tr><tr><td>0.05</td><td>Const.</td><td>99.48 ± 10.10</td><td>44.70 ± 13.23</td><td>33.25 ± 6.50</td><td>33.25 ± 6.50</td><td>0.98 ± 0.00</td></tr><tr><td colspan="7">DIABETES dataset (m=4.96, L=270.20)</td></tr><tr><td>Step Size</td><td>Method</td><td>Mean ESS</td><td>Min ESS</td><td>Mean ESS/Sec.</td><td>Min. ESS/Sec.</td><td>Acc. Prob</td></tr><tr><td>0.001</td><td>Cheby.</td><td>726.08 ± 33.92</td><td>424.59 ± 58.77</td><td>11.64 ± 0.85</td><td>6.83 ± 1.16</td><td>0.99 ± 0.00</td></tr><tr><td>0.001</td><td>Const.</td><td>100.50 ± 9.32</td><td>41.84 ± 19.33</td><td>3.6 ± 0.31</td><td>1.50 ± 0.68</td><td>0.99 ± 0.00</td></tr><tr><td>0.005</td><td>Cheby.</td><td>731.46 ± 33.04</td><td>395.82 ± 47.98</td><td>54.92 ± 5.26</td><td>29.61 ± 3.75</td><td>0.99 ± 0.00</td></tr><tr><td>0.005</td><td>Const.</td><td>100.16 ± 11.83</td><td>44.62 ± 20.81</td><td>14.71 ± 2.52</td><td>6.67 ± 3.37</td><td>0.99 ± 0.00</td></tr><tr><td>0.01</td><td>Cheby.</td><td>687.74 ± 29.31</td><td>399.44 ± 45.01</td><td>93.10 ± 6.78</td><td>53.90 ± 5.38</td><td>0.98 ± 0.00</td></tr><tr><td>0.01</td><td>Const.</td><td>83.04 ± 9.36</td><td>36.39 ± 12.43</td><td>20.87 ± 3.31</td><td>9.09 ± 3.25</td><td>0.98 ± 0.00</td></tr><tr><td>0.05</td><td>Cheby.</td><td>546.80 ± 37.40</td><td>330.09 ± 34.31</td><td>206.07 ± 17.76</td><td>125.07 ± 18.87</td><td>0.96 ± 0.00</td></tr><tr><td>0.05</td><td>Const.</td><td>57.11 ± 9.52</td><td>23.44 ± 9.57</td><td>27.23 ± 5.18</td><td>11.02 ± 4.34</td><td>0.96 ± 0.00</td></tr></table>
589
+
590
+ # E.2 SAMPLING FROM A HARD DISTRIBUTION
591
+
592
+ Table 5: Sampling from a distribution $\pi \left( x\right) \propto \exp \left( {-{f}_{h}\left( x\right) }\right)$ whose potential ${f}_{h}\left( \cdot \right)$ is defined on (15).
593
+
594
+ <table><tr><td>Step Size</td><td>Method</td><td>Mean ESS</td><td>Min ESS</td><td>Mean ESS/Sec.</td><td>Min. ESS/Sec.</td><td>Acc. Prob</td></tr><tr><td colspan="7">sampling from π(x) ∝ exp(-f0.001(x))</td></tr><tr><td>0.001</td><td>Cheby.</td><td>6222.21 ± 88.90</td><td>453.03 ± 30.35</td><td>114.74 ± 7.59</td><td>8.36 ± 0.83</td><td>1.00 ± 0.00</td></tr><tr><td>0.001</td><td>Const.</td><td>2098.18 ± 46.56</td><td>63.53 ± 15.00</td><td>82.31 ± 5.29</td><td>2.50 ± 0.63</td><td>1.00 ± 0.00</td></tr><tr><td colspan="7">sampling from π(x) ∝ exp(-f0.005(x))</td></tr><tr><td>0.005</td><td>Cheby.</td><td>6271.43 ± 117.71</td><td>429.42 ± 34.52</td><td>545.76 ± 26.10</td><td>37.28 ± 2.29</td><td>0.99 ± 0.00</td></tr><tr><td>0.005</td><td>Const.</td><td>2125.36 ± 21.87</td><td>67.42 ± 16.51</td><td>361.14 ± 5.65</td><td>11.44 ± 2.76</td><td>0.99 ± 0.00</td></tr><tr><td colspan="7">sampling from π(x) ∝ exp(-f0.01(x))</td></tr><tr><td>0.01</td><td>Cheby.</td><td>6523.21 ± 95.65</td><td>459.48 ± 38.83</td><td>1070.77 ± 68.78</td><td>75.61 ± 9.79</td><td>0.99 ± 0.00</td></tr><tr><td>0.01</td><td>Const.</td><td>2125.04 ± 31.83</td><td>69.66 ± 20.75</td><td>528.35 ± 80.17</td><td>17.19 ± 6.34</td><td>0.99 ± 0.00</td></tr><tr><td colspan="7">sampling from π(x) ∝ exp(-f0.05(x))</td></tr><tr><td>0.05</td><td>Cheby.</td><td>6457.21 ± 110.05</td><td>375.97 ± 30.64</td><td>3319.51 ± 134.92</td><td>193.06 ± 14.49</td><td>0.97 ± 0.00</td></tr><tr><td>0.05</td><td>Const.</td><td>2796.41 ± 56.89</td><td>62.33 ± 13.26</td><td>1893.99 ± 57.23</td><td>42.22 ± 9.05</td><td>0.97 ± 0.00</td></tr></table>
acceleratinghamiltonianmontecarloviachebyshevintegrationtime/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e0f60f2b2661894e5680f1c9886c1d836acaf073ba884c9cea25c4510cea05c
3
+ size 985712
acceleratinghamiltonianmontecarloviachebyshevintegrationtime/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d736a7f253b82f25bb0744d110b88df9b4b335acf2a5217cbd0276756623f2eb
3
+ size 790280
accuratebayesianmetalearningbyaccuratetaskposteriorinference/bc44ccfe-a7ec-4f63-9ab2-c5d75c07ac9d_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e950a594a4091d5c75428269fcc112defa3cbfff62f9991bacbba7af178432dc
3
+ size 178780
accuratebayesianmetalearningbyaccuratetaskposteriorinference/bc44ccfe-a7ec-4f63-9ab2-c5d75c07ac9d_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b3e37c4c4ee3365cc35c0eeb0433d57a52e31a3c5d156fb6e581e18bd3d74ba
3
+ size 226266
accuratebayesianmetalearningbyaccuratetaskposteriorinference/bc44ccfe-a7ec-4f63-9ab2-c5d75c07ac9d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68a30f665e5698b3cb0a56b5ddf09f9201b33593540728a6fa3b2c96107bcaee
3
+ size 8920018
accuratebayesianmetalearningbyaccuratetaskposteriorinference/full.md ADDED
The diff for this file is too large to render. See raw diff
 
accuratebayesianmetalearningbyaccuratetaskposteriorinference/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09953ec8004c7ceefa83829f4b0eafd9b6b57f5bf83c2b51c2c71ac5c522159d
3
+ size 1043744
accuratebayesianmetalearningbyaccuratetaskposteriorinference/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8e73058f988cafd4d87fdfc65ebf90d35913da6e58e0ef3839afb6a843aecde
3
+ size 1089574
accurateneuraltrainingwith4bitmatrixmultiplicationsatstandardformats/177823cd-63fa-4ca5-884d-21d34829807f_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c387ecf61de5d960cd32da1ae9732bbab7ec992222e4f3de061e64dc75481bc6
3
+ size 75462
accurateneuraltrainingwith4bitmatrixmultiplicationsatstandardformats/177823cd-63fa-4ca5-884d-21d34829807f_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90b79d196d07413ec9bc5eebaf9d7c6841710f1dad0005ec853d890d06246987
3
+ size 89768
accurateneuraltrainingwith4bitmatrixmultiplicationsatstandardformats/177823cd-63fa-4ca5-884d-21d34829807f_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7de14d9720b38d6b6af961d0d98c7ae6fc001b23103f083598c459d3cdeb8a1f
3
+ size 501562
accurateneuraltrainingwith4bitmatrixmultiplicationsatstandardformats/full.md ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ACCURATE NEURAL TRAINING WITH 4-BIT MATRIX MULTIPLICATIONS AT STANDARD FORMATS
2
+
3
+ Brian Chmiel †° Ron Banner † Elad Hoffer † Hilla Ben Yaacov † Daniel Soudry °
4
+
5
+ † Habana Labs – An Intel company, Caesarea, Israel,
6
+
7
+ $^{\circ}$ Department of Electrical Engineering - Technion, Haifa, Israel
8
+
9
+ {bchmiel, rbanner, ehoffer, hbyaacov}@habana.ai
10
+
11
+ {daniel.soudry}@gmail.com
12
+
13
+ # ABSTRACT
14
+
15
+ Quantization of the weights and activations is one of the main methods to reduce the computational footprint of Deep Neural Networks (DNNs) training. Current methods enable 4-bit quantization of the forward phase. However, this constitutes only a third of the training process. Reducing the computational footprint of the entire training process requires the quantization of the neural gradients, i.e., the loss gradients with respect to the outputs of intermediate neural layers.
16
+
17
+ Previous works separately showed that accurate 4-bit quantization of the neural gradients needs to (1) be unbiased and (2) have a log scale. However, no previous work aimed to combine both ideas, as we do in this work. Specifically, we examine the importance of having unbiased quantization in quantized neural network training, where to maintain it, and how to combine it with logarithmic quantization. Based on this, we suggest a logarithmic unbiased quantization (LUQ) method to quantize both the forward and backward phases to 4-bit, achieving state-of-the-art results in 4-bit training without the overhead. For example, in ResNet50 on ImageNet, we achieved a degradation of $1.1\%$ . We further improve this to a degradation of only $0.32\%$ after three epochs of high precision fine-tuning, combined with a variance reduction method—where both these methods add overhead comparable to previously suggested methods. A reference implementation is supplied in the supplementary material.
18
+
19
+ # 1 INTRODUCTION
20
+
21
+ Deep neural networks (DNNs) training consists of three main general-matrix-multiply (GEMM) phases: the forward phase, backward phase, and update phase. Quantization has become one of the main methods to compress DNNs and reduce the GEMM computational resources. Previous works showed the weights and activations in the forward pass to 4 bits while preserving model accuracy (Banner et al., 2019; Nahshan et al., 2019; Bhalgat et al., 2020; Choi et al., 2018b). Despite these advances, they only apply to a third of the training process, while the backward phase and update phase are still computed with higher precision.
22
+
23
+ Recently, Sun et al. (2020) was able, for the first time, to train a DNN while reducing the numerical precision of most of its parts to 4 bits with some degradation (e.g., $2.49\%$ error in ResNet50). To do so, Sun et al. (2020) suggested a non-standard radix-4 floating-point format, combined with double quantization of the neural gradients (called two-phase rounding). This was an impressive step forward in the ability to quantize all GEMMs in training. However, since a radix-4 format is not aligned with conventional radix-2, any numerical conversion between the two requires an explicit multiplication to modify both the exponent and mantissa. Thus, their non-standard quantization requires specific hardware support (Kupriianova et al., 2013) that can significantly reduce the benefit of quantization to low bits (Appendix A.6), and make it less practical.
24
+
25
+ The main challenge in reducing the numerical precision of the entire training process is quantizing the neural gradients, i.e. the backpropagated error. Previous works showed separately that, to achieve accurate low precision representation of the neural gradients, it is important to use: (1) Logarithmic
26
+
27
+ quantization and (2) Unbiased quantization. Specifically, Chmiel et al. (2021) showed the neural gradients have a heavy tailed near-lognormal distribution and found an analytical expression for the optimal floating point format. At low precision levels, the optimal format is logarithmically quantized. For example, for FP4 the optimal format is [sign,exponent,mantissa] = [1,3,0], i.e. without mantissa bits. In contrast, weights and activations are well approximated with Normal or Laplacian distributions (Banner et al., 2019; Choi et al., 2018a), and therefore are better approximated using uniform quantization (e.g., INT4). However, Chmiel et al. (2021) did not use unbiased quantization (nor did any of the previous works that use logarithmic quantization of the neural gradients (Li et al., 2020; Miyashita et al., 2016; Ortiz et al., 2018)). Therefore, they were unable to successfully quantize in this FP4 format (their narrowest format was FP5).
28
+
29
+ Chen et al. (2020a) showed that unbiased quantization of the neural gradients is essential to get unbiasedness in the weight gradients, which is required in SGD analysis convergence (Bottou et al., 2018). However, they focused on quantization using integer formats, as did other works that pointed out on the importance of being unbiased (Banner et al., 2018; Zhong et al., 2022). Naive quantization of the neural gradients using the optimal FP4 format (logarithmic) results in biased estimates of the FP32 weight gradients—and this leads to severe degradation in the test accuracy. For example, a major issue is that under aggressive (naive) quantization many neural gradients with magnitudes below the representable range are zeroed, resulting in biased estimates of the FP32 gradients and reduced model accuracy.
30
+
31
+ Using either a logarithmic scale or unbiased rounding alone catastrophically fails at 4bit quantization of the neural gradients (e.g., see Fig. 2 below). Therefore, it is critical to combine them, as we do in this paper in Section 4. To do this, we stochastically quantize gradients below the representable range to either zero or the smallest representable magnitude $\alpha$ to provide unbiased estimates within that "underflow" range. Additionally, in order to represent the maximum magnitude without bias, we dynamically adjust $\alpha$ so that the maximum can always be represented with an exponentiated scaling starting at $\alpha$ . Finally, to completely eliminate bias, we devise an efficient way to use stochastic rounding on a logarithmic scale, on the values between $\alpha$ and the maximum. Together, this gradient quantization method is called Logarithmic Unbiased Quantization (LUQ), and for 4-bit quantization it uses a numerical format with one sign bit, three exponent bits, and zero mantissa bits, along with stochastic mapping (to zero or $\alpha$ ) of gradients whose values are below $\alpha$ and stochastic rounding within the representable range.
32
+
33
+ Main contribution LUQ, for the first time, combines logarithmic quantization with unbiased quantization for the neural gradients and does this efficiently using a standard format. By additionally quantizing the forward phase to INT4, we enable, for the first time, an efficient scheme for "full 4-bit training", i.e. the weights, activations and neural gradients are quantized to 4-bit in standard formats (see Appendix A.1) so all GEMMs can be done in 4-bit, and also bandwidth can be reduced. As we show, this method requires little to no overhead while achieving state-of-the-art accuracy results: for example, in ResNet50 we get $1.1\%$ error degradation with standard formats; in comparison, the previous method (Sun et al., 2020) had $2.49\%$ error degradation but required non-standard formats, as well as additional modifications which have additional overhead.
34
+
35
+ Moreover, in Section 5 we suggest two optional simple methods to further reduce the degradation, with some overhead: the first method reduces the quantization variance of the neural gradients using re-sampling, while the second is fine-tuning in high precision. Combining LUQ with these two proposed methods we achieve, for the first time, only $0.32\%$ error in the 4-bit training of ResNet50. The overhead of our additional methods is no more than similar modifications previously suggested in Sun et al. (2020). Lastly, in Section 7 we discuss how to reduce remaining overheads such as data movement, scaling operations, and GEMM-related operations.
36
+
37
+ # 2 RELATED WORKS
38
+
39
+ Neural networks Quantization has been extensively investigated in the last few years. Most of the quantization research has focused on reducing the numerical precision of the weights and activations for inference (e.g., Courbariaux et al. (2016); Rastegari et al. (2016); Banner et al. (2019); Nahshan et al. (2019); Choi et al. (2018b); Bhalgat et al. (2020); Choi et al. (2018a); Liang et al. (2021)). In this case, for standard ImageNet models, the best performing methods can achieve quantization to 4 bits with small or no degradation Choi et al. (2018a); Sakr et al. (2022). These methods can
40
+
41
+ be used to reduce the computational resources in approximately a third of the training (Eq. (1)). However, without quantizing the neural gradients, we cannot reduce the computational resources in the remaining two thirds of the training process (Eq. (2) and Eq. (3)). An orthogonal approach is low precision for the gradients of the weights in distributed training (Alistarh et al., 2016; Bernstein et al., 2018) in order to reduce the bandwidth and not the training computational resources.
42
+
43
+ Sakr & Shanbhag (2019) suggest a systematic approach to design a full training using fixed point quantization which includes mixed-precision quantization. Banner et al. (2018) first showed that it is possible to use INT8 quantization for the weights, activations, and neural gradients, thus reducing the computational footprint of most parts of the training process. Concurrently, Wang et al. (2018) was the first work to achieve full training in FP8 format. Additionally, they suggested a method to reduce the accumulator precision from 32bit to 16 bits, by using chunk-based accumulation and floating point stochastic rounding. Later, Wiedemann et al. (2020) showed full training in INT8 with improved convergence, by applying a stochastic quantization scheme to the neural gradients called non-subtractive-dithering (NSD). Also, Sun et al. (2019) presented a novel hybrid format for full training in FP8, while the weights and activations are quantized to [1,4,3] format, the neural gradients are quantized to [1,5,2] format to catch a wider dynamic range. Fournarakis & Nagel (2021) suggested a method to reduce the data traffic during the calculation of the quantization range, using the moving average of the tensor's statistics.
44
+
45
+ While it appears that it is possible to quantize to 8-bits all computational elements in the training process, 4-bits quantization of the neural gradients is still challenging. Chmiel et al. (2021) suggested that this difficulty stems from the heavy-tailed distribution of the neural gradients, which can be approximated with a lognormal distribution. This distribution is more challenging to quantize in comparison to the normal distribution which is usually used to approximate the weights or activations (Banner et al., 2019). Different works (Li et al., 2020; Miyashita et al., 2016) tried to use logarithmic quantization for the neural gradients, however, they failed to quantize them unbiasedly.
46
+
47
+ Sun et al. (2020) was the first work that presented a method to reduce the numerical precision to 4-bits for the vast majority of the computations needed during DNNs training. They use known methods to quantize the forward phase to INT4 and suggested quantizing the neural gradients twice with a non-standard radix-4 FP4 format. The use of the radix-4, instead of the commonly used radix-2 format, allows for covering a wider dynamic range. The main problem with their method is the specific hardware support for their suggested radix-4 datatype, which may limit the practicality of implementing their suggested data type.
48
+
49
+ Chen et al. (2020b) suggested reducing the variance in neural gradients quantization by dividing them into several blocks and quantizing each to INT4 separately. They require expensive sorting. Additionally, their per-sample quantization do not allow the use of an efficient GEMM operation.
50
+
51
+ # 3 BACKGROUND: QUANTIZATION FORMS AND ROUNDED SCHEMES
52
+
53
+ Which quantization schemes should we use in 4bit training? Previous works (Choi et al., 2018a; Banner et al., 2019) showed that weights and activations can be quantized to INT4 with little to no accuracy degradation. In contrast, for the neural gradients, a recent work (Chmiel et al., 2021) showed analytically that the optimal format is logarithmic ([1,3,0]). Combining all these schemes, we focus on full 4-bit training using standard formats, with the following three 4-bit quantized GEMMs:
54
+
55
+ $$
56
+ [ \text {F o r w a r d} ] \quad z _ {l} = Q _ {\mathrm {I N T}} \left(W _ {l}\right) Q _ {\mathrm {I N T}} \left(a _ {l - 1}\right); \quad a _ {l} = f _ {l} \left(z _ {l}\right) \tag {1}
57
+ $$
58
+
59
+ $$
60
+ [ \text {B a c k w a r d} ] \quad g _ {l - 1} = Q _ {\mathrm {I N T}} \left(W _ {l} ^ {T}\right) Q _ {\mathrm {F P}} (\delta_ {l}); \quad \delta_ {l} = f _ {l} ^ {\prime} \left(z _ {l}\right) \odot g _ {l} \tag {2}
61
+ $$
62
+
63
+ $$
64
+ [ \text {U p d a t e} ] \quad \frac {\partial C}{\partial W _ {l}} = Q _ {\mathrm {F P}} \left(\delta_ {l}\right) Q _ {\mathrm {I N T}} \left(a _ {l - 1} ^ {T}\right), \tag {3}
65
+ $$
66
+
67
+ where $C$ is the loss function, $\odot$ is a component-wise product and, in each layer $l$ , $f_{l}$ is the activation function, the weights $(W_{l})$ and activations $(a_{l})$ are quantized with INT4 $(Q_{\mathrm{INT}})$ while the neural gradients $\delta_{l} \triangleq \frac{\partial C}{\partial z_{l}}$ are quantized with logarithmic FP4 $(Q_{\mathrm{FP}})$ , $z_{l}$ are the pre-activations, and $g_{l} \triangleq \frac{\partial C}{\partial a_{l}}$ .
68
+
69
+ Next, we aim to find which rounding scheme should we use in each quantizer. Thus, we study the effects of unbiased rounding during the three phases of training (Eqs. 1, 2, and 3). We show that rounding-to-nearest (RDN) should be applied for the weights and activations $(Q_{\mathrm{INT}})$ , while the unbiased method of stochastic rounding (SR) is more suitable for the neural gradients $(Q_{\mathrm{FP}})$ .
70
+
71
+ # 3.1 MEAN SQUARE ERROR COMPARISON
72
+
73
+ In this section, we show that, although stochastic rounding (SR) is unbiased, it generically has a worse mean-square error (MSE) compared to round-to-nearest (RDN). Given that we want to quantize $x$ in a bin with a lower limit $l(x)$ and an upper limit $u(x)$ , stochastic rounding can be stated as follows:
74
+
75
+ $$
76
+ \operatorname {S R} (x) = \left\{ \begin{array}{l l} l (x), & w. p. \quad p (x) = 1 - \frac {x - l (x)}{u (x) - l (x)} \\ u (x), & w. p. \quad 1 - p (x) = \frac {x - l (x)}{u (x) - l (x)} \end{array} \right.. \tag {4}
77
+ $$
78
+
79
+ The expected rounding value is given by
80
+
81
+ $$
82
+ E [ \operatorname {S R} (x) ] = l (x) \cdot p (x) + u (x) \cdot (1 - p (x)) = x, \tag {5}
83
+ $$
84
+
85
+ where here and below the expectation is over the randomness of SR (i.e., $x$ is a deterministic constant). In Table 1 we present the bias, variance, and MSE of RDN and SR. Full derivatives appear in Appendix A.2.
86
+
87
+ Table 1: Comparison of the bias, variance, and MSE of two different rounding schemes: round-to-nearest (RDN) and stochastic rounding (SR)
88
+
89
+ <table><tr><td>Rounding</td><td>Bias</td><td>Variance</td><td>MSE</td></tr><tr><td>RDN</td><td>min (x - l(x), u(x) - x)</td><td>0</td><td>[min (x - l(x), u(x) - x)]2</td></tr><tr><td>SR</td><td>0</td><td>(x - l(x)) · (u(x) - x)</td><td>(x - l(x)) · (u(x) - x)</td></tr></table>
90
+
91
+ From Table 1, since $\min (a,b)^2\leq a\cdot b$ for every $a,b$ , we have that
92
+
93
+ $$
94
+ \operatorname {M S E} [ S R (x) ] \geq \operatorname {M S E} [ R D N (x) ], \quad \forall x. \tag {6}
95
+ $$
96
+
97
+ In Fig. 1a we plot the mean-square-error for $x \in [0,1]$ , $l(x) = 0$ , and $u(x) = 1$ . However, while round-to-nearest has a lower MSE than SR, the former is a biased estimator.
98
+
99
+ # 3.2 WHEN IS IT IMPORTANT TO USE UNBIASED QUANTIZATION?
100
+
101
+ To prove convergence, textbook analyses of SGD typically assume the expectation of the (mini-batch) weight gradients is sufficiently close to the true (full-batch) gradient (e.g., assumption 4.3 in (Bottou et al., 2018)). This assumption is satisfied when the weight gradients are unbiased. Next, we explain (as pointed out by previous works, such as Chen et al. (2020a)) that the weight gradients are unbiased when the neural gradients are quantized stochastically without bias.
102
+
103
+ Recall $W_{l}$ and $f_{l}$ are, respectively, the weights and activation function at layer $l$ and $C$ is the cost function. Given an input-output pair $(x,y)$ , the loss is:
104
+
105
+ $$
106
+ C \left(y, f _ {L} \left(W _ {L} f _ {L - 1} \left(W _ {L - 1} \dots f _ {2} \left(W _ {2} f _ {1} \left(W _ {1} x\right)\right) \dots\right)\right)\right). \tag {7}
107
+ $$
108
+
109
+ Backward and Update Phases Recall $z_{l}$ and $a_{l}$ are, respectively, the pre- and post- activations of layer $l$ , and $\delta_l \triangleq \frac{dC}{dz_l}$ . Defining $\delta_l^q \triangleq Q_{\mathrm{FP}}(\delta_l)$ , we show in Appendix A.3 that the gradient of the weights in layer $l$ is $\nabla_{W_l} C = \delta_l a_{l-1}^\top$ and its quantized form is $\nabla_{W_l} C_q = \delta_l^q a_{l-1}^\top$ . Therefore, the update $\nabla_{W_l} C_q$ is an unbiased estimator of $\nabla_{W_l} C$ :
110
+
111
+ $$
112
+ E \left[ \nabla_ {W _ {l}} C _ {q} \right] = E \left[ \delta_ {l} ^ {q} a _ {l - 1} ^ {\top} \right] = E \left[ \delta_ {l} ^ {q} \right] a _ {l - 1} ^ {\top} = \delta_ {l} a _ {l - 1} ^ {\top} = \nabla_ {W _ {l}} C. \tag {8}
113
+ $$
114
+
115
+ Forward phase The forward phase is different from the backward and updates phases in that unbiasedness at the tensor level is not necessarily a guarantee of unbiasedness at the model level since the activation functions and loss functions are not linear. For example, suppose we have two weight layers $W_{1}, W_{2}$ , activation $f$ , input $x$ , and an SR quantizer $Q$ . Then, despite that $Q$ is unbiased (i.e., $EQ(x) = x$ ), we get:
116
+
117
+ $$
118
+ \mathbb {E} \left[ \right. f \left( \right.W _ {2} Q \left(f \left(W _ {1} x\right)\right)\left. \right] \neq \mathbb {E} \left[ \right. f \left( \right.W _ {2} \left(f \left(W _ {1} x\right)\right)\left. \right]\left. \right. \tag {9}
119
+ $$
120
+
121
+ since $f$ is non-linear. This means there is no point to use SR in the forward path since it will increase the MSE (Eq. (6)), but it will not fix the bias issue.
122
+
123
+ # 3.3 CONCLUSIONS: WHEN TO USE EACH ROUNDING SCHEME?
124
+
125
+ Following the above results, the activation and weights quantization in the forward phase ( $Q_{INT}$ in eq. Eq. (1)) should use RDN. This is because SR will increase the MSE (as shown in Eq. (6)), an increase which typically harms the final accuracy $^2$ , but will not help make the loss estimate unbiased, due to the non-linearity of the loss and activation functions (e.g., Eq. (9)). To avoid mismatch (and additional bias), we use RDN in $Q_{\mathrm{INT}}$ also in the other phases of training.
126
+
127
+ As we explained in section 3.2, unbiased neural gradients quantization leads to an unbiased estimate of the weight gradients, which enables proper convergence of SGD (Bottou et al., 2018). Thus, bias in the gradients can hurt the performance and should be avoided, even at the cost of increasing the MSE. Therefore, neural gradient quantization $(Q_{\mathrm{FP}})$ , should be done using a SR rounding scheme, following subsection 3.2. In Figs. 1b and 1c we see that these theoretical observations are consistent with empirical observations favoring RDN for the weights and activations $(Q_{\mathrm{INT}})$ and SR for the neural gradients $(Q_{\mathrm{FP}})$ .
128
+
129
+ ![](images/a478be00252bff7864ebd56c99a72744f0db5aae1bdf0d82d6757f8fe08aec45.jpg)
130
+ (a)
131
+
132
+ ![](images/a98e28ea33d99c56cf1c58aa13deb3e2aa4798c8f74401a4776108106dc5f64d.jpg)
133
+ (b)
134
+ Figure 1: Comparison between stochastic rounding (SR) and round-to-nearest (RDN) quantization. In (a) we present the MSE of a uniform distributed tensor with the two different rounding schemes. Quantization to 4 bits of the activations and weights $(Q_{\mathrm{INT}})$ (b) and neural gradients $(Q_{\mathrm{FP}})$ (c) of ResNet18 - Cifar100 dataset with SR and RDN. While MSE is important in $Q_{\mathrm{INT}}$ for the weights and activations, unbiasedness achieved with SR is crucial for the neural gradients in $Q_{\mathrm{FP}}$ . The neural gradients in (b) and the weights and activations in (c), are in full precision to focus on the effect of the rounding scheme only in one part of the network in each experiment.
135
+
136
+ ![](images/9c11b92c7525fd77775c0eab36876f227eba22b46170cd076918cc8d258795a4.jpg)
137
+ (c)
138
+
139
+ # 4 LUQ: A LOGARITHMIC UNBIASED QUANTIZER
140
+
141
+ Following the conclusions from the previous section, the neural gradients quantizer ( $Q_{\mathrm{FP}}$ in eqs. 2 and 3) should be logarithmic and be completely unbiased. In this section we aim to do this efficiently and create, for the first time, a Logarithmic Unbiased Quantizer (LUQ).
142
+
143
+ Standard radix-2 floating-point defines a dynamic range. In standard FP, all the values below the minimum FP representation are pruned to 0 and all the values above the maximum FP representation are clipped to the maximum. In order to create a fully unbiased quantizer, we need to keep all the following three regions unbiased: below range minimum, inside range, and above range maximum.
144
+
145
+ 1) Below FP minimum: Stochastic underflow Given an underflow threshold $\alpha$ we define a stochastic pruning operator, which prunes a given value $x$ , as
146
+
147
+ $$
148
+ T _ {\alpha} (x) = \left\{ \begin{array}{l l} x & , \text {i f} | x | \geq \alpha \\ \operatorname {s i g n} (x) \cdot \alpha & w. p. \frac {| x |}{\alpha}, \text {i f} | x | < \alpha \\ 0 & w. p. 1 - \frac {| x |}{\alpha}, \text {i f} | x | < \alpha . \end{array} \right. \tag {10}
149
+ $$
150
+
151
+ 2) Above FP maximum: Underflow threshold In order to create an unbiased quantizer, the largest quantization value $2^{2^{b-1}} \alpha$ should avoid clipping any values in a tensor $x$ , otherwise this will create a bias. Therefore, the maximal quantization value is chosen as $\max(|x|)$ , the minimal value which will avoid clipping and bias. Accordingly, the underflow threshold $\alpha$ is (with $b = 3$ for FP4)
152
+
153
+ $$
154
+ \alpha = \frac {\operatorname* {m a x} (| x |)}{2 ^ {2 ^ {b - 1}}}. \tag {11}
155
+ $$
156
+
157
+ ![](images/4ff6436983a64d605068ffa5b574d6e151094258cfd3145ca87e298cbc93a4f1.jpg)
158
+ Figure 2: (Left): ResNet50 top-1 validation accuracy in ImageNet dataset with different quantization schemes for the neural gradients. FP4 refers to standard logarithmic (1-3-0) floating point quantization. SR refers to stochastic rounding, which makes the quantization unbiased inside the FP range. SU refers to stochastic underflow which makes the quantization unbiased below minimum FP. "INT4 unbiased" refers to the combination of INT4 and SR which is fully unbiased. Notice that while biased logarithmic quantization ("FP4"), partially biased logarithmic ("FP4 + SR", "FP4 + SR + SU"), and uniform unbiased ("INT4 unbiased") lead to significant accuracy degradation, the proposed fully unbiased logarithmic quantization ("LUQ") has a minimal degradation. (Right): Illustration of the different logarithmic quantization schemes for FP2 ([1,1,0] format), where $2^0$ and $2^2$ are, respectively, the minimal and maximal FP representations. Two arrows for the same point mean SR - thicker lines represent higher probability. Only LUQ is able to achieve unbiasedness in all floating point ranges. The operation in Eq. (11) can have some overhead, but in Section 7 we present two methods ("Reducing the data movement" and "Reducing the cost of the scaling operation") to decrease this overhead.
159
+
160
+ 3) Inside FP range: Logarithmic SR Given an underflow threshold $\alpha$ , let $Q_{\alpha}(x)$ be a FP round-to-nearest $b$ -bits quantizer with bins $\{\alpha, 2\alpha, \dots, 2^{2^{b-1}}\alpha\}$ . Assume, without loss of generality, $2^{n-1}\alpha < x < 2^{n}\alpha$ ( $n \in \{0, 1, \dots, b-1\}$ ). We will use the following quantizer, which is a special case of SR (Eq. (4)), and is unbiased as a special case of Eq. (5):
161
+
162
+ $$
163
+ Q _ {\alpha} (x) = \left\{ \begin{array}{l l} 2 ^ {n - 1} \alpha & w. p. \frac {2 ^ {n} \alpha - x}{2 ^ {n} \alpha - 2 ^ {n - 1} \alpha} \\ 2 ^ {n} \alpha & w. p. 1 - \frac {2 ^ {n} \alpha - x}{2 ^ {n} \alpha - 2 ^ {n - 1} \alpha} = \frac {x - 2 ^ {n - 1} \alpha}{2 ^ {n - 1} \alpha}. \end{array} \right. \tag {12}
164
+ $$
165
+
166
+ The naive implementation of stochastic rounding can be expensive since it cannot use the standard quantizer. Traditionally, in order to use the standard quantizer the implementation includes simply adding uniform random noise $\epsilon \sim U\left[-\frac{2^{n-1}\alpha}{2}, \frac{2^{n-1}\alpha}{2}\right]$ to $x$ and then using a round-to-nearest operation. The overhead of such stochastic rounding is typically negligible (Appendix A.5.1) in comparison to other operations in neural networks training. Moreover, it is possible to reduce any such overhead with the re-use of the random samples (Appendix A.5.2). In our case, in order to implement a logarithmic round-to-nearest, we need to correct an inherent bias since $\alpha \cdot 2^{\lfloor \log \left(\frac{|x|}{\alpha}\right)\rfloor} \neq \alpha \cdot \lfloor 2^{\log \left(\frac{|x|}{\alpha}\right)}\rfloor$ .
167
+
168
+ For a bin $[2^{n - 1}, 2^n]$ , the midpoint $x_m$ is
169
+
170
+ $$
171
+ x _ {m} = \frac {2 ^ {n} + 2 ^ {n - 1}}{2} = \frac {3}{4} \cdot 2 ^ {n - 1}. \tag {13}
172
+ $$
173
+
174
+ Therefore, we can apply round-to-nearest-power (RDNP) directly on the exponent $x$ of any value $2^{n - 1} < 2^x < 2^n$ as follows:
175
+
176
+ $$
177
+ \bar {\mathrm {R D N}} \bar {\mathrm {P}} (2 ^ {x}) = 2 ^ {\left\lfloor \log \left(\frac {4}{3} \cdot 2 ^ {x}\right) \right\rfloor} = 2 ^ {\left\lfloor x + \log \left(\frac {4}{3}\right) \right\rfloor} = 2 ^ {\mathrm {R D N} \left(x + \log \left(\frac {4}{3}\right) - \frac {1}{2}\right)} \approx 2 ^ {\mathrm {R D N} (x - 0. 0 8 4)}. \tag {14}
178
+ $$
179
+
180
+ Notice that the use of RDNP avoids the need of converting back to linear space in order to implement SR and avoid additional computational overhead.
181
+
182
+ Logarithmic unbiased quantization (LUQ) LUQ, the quantization method we suggested above, is unbiased since it can be thought of as applying logarithmic stochastic rounding (Eq. (12)) on top of stochastic pruning (Eq. (17))
183
+
184
+ $$
185
+ X _ {q} = Q _ {\alpha} \left(T _ {\alpha} (x)\right). \tag {15}
186
+ $$
187
+
188
+ Since $T_{\alpha}$ and $Q_{\alpha}$ are unbiased, $X_{q}$ is an unbiased estimator for $x$ , from the law of total expectation,
189
+
190
+ $$
191
+ E \left[ X _ {q} \right] = E \left[ Q _ {\alpha} \left(T _ {\alpha} (x)\right) \right] = E \left[ E \left[ Q _ {\alpha} \left(T _ {\alpha} (x)\right) \right] \mid T _ {\alpha} (x) \right] = E \left[ T _ {\alpha} (x) \right] = x, \tag {16}
192
+ $$
193
+
194
+ where the expectation is over the randomness of $T_{\alpha}$ and $Q_{\alpha}$ .
195
+
196
+ In Fig. 2 (Left) we show an ablation study of the effect of the different quantization schemes on ResNet50 in ImageNet: while standard (biased) FP4 diverges, adding stochastic rounding or stochastic underflow (which make the process partially unbiased) enables convergence, but with significant degradation. Combining logarithmic quantization with full unbiasedness in LUQ obtained minimal accuracy degradation. Notice also that only unbiasedness without logarithmic quantization ("INT4 bias") completely diverges. In Fig. 2 (Right) we show an illustration of the different logarithmic quantization schemes, where only LUQ achieved a fully logarithmic unbiasedness FP quantization.
197
+
198
+ # 5 OPTIONAL METHODS
199
+
200
+ Next, we present two optional methods to improve accuracy at some computational cost.
201
+
202
+ # 5.1 SMP: REDUCING THE VARIANCE WHILE KEEPING IT UNBIASED
203
+
204
+ In the previous section, we presented an unbiased method for logarithmic quantization of the neural gradients called LUQ. Following the bias-variance decomposition, if the gradients are now unbiased, then the only remaining issue should be their variance. Therefore, we suggest an optional method to reduce the quantization variance by repeatedly sampling from the stochastic quantizers in LUQ, and averaging the resulting samples of the final weight gradients. The proposed sampling can be implemented serially or in parallel. The serial implementation (re-using the same quantizer) has a power and throughput overhead but does not require additional hardware (area) support, so it should be used if the chip area is the bottleneck. The parallel implementation avoids almost completely the throughput overhead (except the averaging operation), but it requires additional area for the multiple quantizers, so it should be used when the throughput is the bottleneck. For $N$ different samples, the proposed method will reduce the variance by a factor of $\frac{1}{N}$ , without affecting the bias (Gilli et al., 2019). In Appendix Fig. 5 we show the effect of the different number of samples (SMP) on 2-bit quantization of ResNet18 Cifar100 dataset. There, with 16 samples, we achieve accuracy similar to a full-precision network. This demonstrates that the variance is the only remaining issue in neural gradient quantization using LUQ and that the proposed averaging method can erase this variance gap, with some overhead.
205
+
206
+ A different approach to reduce the variance can be to increase the bitwidth in the update phase. In order to keep using standard formats, we should increase the update phase to FP8-INT4, which leads to $3.5 \times$ degradation in compute density in comparison to FP4-INT4 as shown in Sun et al. (2020) (Table s-1). Therefore, using the proposed SMP with two samples (as we shall do) has a significant advantage in compute density $(1.75 \times)$ in comparison to increasing the bitwidth.
207
+
208
+ # 5.2 FNT: FINE-TUNING IN HIGH PRECISION
209
+
210
+ After the 4-bit training is finished, we suggest an optional method to reduce the gap from the full precision model, by running $T$ additional iterations in which we increase all the network parts to higher precision, except the weights which remain in low precision. We noticed that with this scheme we get the best accuracy for the fine-tuned network. At inference time the activations and weights are quantized to lower precision. During the fine-tune phase, the Learning Rate (LR) is increased linearly during $\frac{T}{2}$ iterations and then reduced linearly with the same slope:
211
+
212
+ $$
213
+ \mathrm {L R} _ {t} = \left\{ \begin{array}{l l} \mathrm {L R} _ {T} + \frac {\left(\mathrm {L R} _ {\text {b a s e}} - \mathrm {L R} _ {T}\right)}{T / 2} & , \text {i f} t \leq \frac {T}{2} \\ \mathrm {L R} _ {T} \cdot \frac {(T - t)}{T / 2} & , \text {e l s e} \end{array} , \right. \tag {17}
214
+ $$
215
+
216
+ where $\mathrm{LR}_T$ is the final LR of the 4-bit training and $\mathrm{LR}_{\mathrm{base}}$ is the maximal LR of the fine-tune phase.
217
+
218
+ # 6 EXPERIMENTS
219
+
220
+ In this section, we evaluate the proposed LUQ for 4-bit training on various DNN models. For all models, we use their default architecture, hyper-parameters, and optimizers combined with a
221
+
222
+ custom-modified Pytorch framework that implemented all the low precision schemes. Additional experimental details appear in Appendix A.4.
223
+
224
+ INT4 quantization INT4 quantization methods for the weights and activations (forward pass) were well studied in the past. In this paper, we used SAWB Choi et al. (2018a) to quantize the weights and activations. SAWB determines the quantization scaling factor by first finding the optimal (in terms of MSE) scaling factor on six distribution approximations of the true tensor distribution, and then applying linear regression to find the chosen scaling factor.
225
+
226
+ Training time measurement Notice that, currently, AI accelerators do not support 4-bit formats for training. This means that we can only simulate the quantization process, but are not able to measure training time or memory reduction. This is the common practice in the neural network quantization literature, where the algorithms often appear before the hardware that can support them. For example, though we can find FP8 training publications since 2019 (Sun et al., 2019), only recently did Nvidia announce their first GPU that supports the FP8 format (H100).
227
+
228
+ Main results In Table 2 we show the Top-1 accuracy achieved in 4-bit training using LUQ to quantize the neural gradients to FP4 and combined with a previously suggested method, SAWB (Choi et al., 2018a), to quantize the weights and activations to INT4. We compare our method with Ultra-low (Sun et al., 2020) showing better results in all the models, achieving SOTA in 4-bit training. Moreover, we improve the results by using the proposed SMP (Section 5.1). In Table 3 we show the effect of the proposed fine-tuning, reducing or closing completely the gap from full-precision model. We verified that stochasticity has only a negligible effect on the variance of final performance by running a few different seeds. Additional experiments appear in Appendix A.5.
229
+
230
+ Table 2: Comparison of 4-bit training of the proposed method LUQ with Ultra-low (Sun et al., 2020) in various vision models with ImageNet dataset, Transformer-base in WMT En-De task dataset and BERT fine-tune in SQUAD dataset. SMP refers to doing two samples of the SR quantization of neural gradients in order to reduce the variance (Section 5.1).
231
+
232
+ <table><tr><td>Model</td><td>Baseline</td><td>Ultra-low3(Sun et al., 2020)</td><td>LUQ</td><td>LUQ + SMP</td></tr><tr><td>ResNet-18</td><td>69.7 %</td><td>68.27%</td><td>69.09%</td><td>69.24 %</td></tr><tr><td>ResNet-50</td><td>76.5%</td><td>74.01%</td><td>75.42 %</td><td>75.63 %</td></tr><tr><td>MobileNet-V2</td><td>71.9 %</td><td>68.85 %</td><td>69.55 %</td><td>69.7 %</td></tr><tr><td>ResNext-50</td><td>77.6 %</td><td>N/A</td><td>76.02 %</td><td>76.12 %</td></tr><tr><td>Transformer-base</td><td>27.5 (BLEU)</td><td>25.4</td><td>27.17</td><td>27.25</td></tr><tr><td>BERT fine-tune</td><td>87.03 (F1)</td><td>N/A</td><td>85.75</td><td>85.9</td></tr><tr><td>ViT B</td><td>76.2</td><td>N/A</td><td>73.7 %</td><td>74.1 %</td></tr></table>
233
+
234
+ Table 3: Effect of the proposed FNT method (Section 5.2) using FP16 format with different epochs.
235
+
236
+ <table><tr><td>Model</td><td>Baseline</td><td>LUQ + SMP</td><td>+FNT 1 epoch</td><td>+FNT 2 epochs</td><td>+FNT 3 epochs</td></tr><tr><td>ResNet-18</td><td>69.7 %</td><td>69.24 %</td><td>69.7 %</td><td>-</td><td>-</td></tr><tr><td>ResNet-50</td><td>76.5 %</td><td>75.63%</td><td>75.89 %</td><td>76 %</td><td>76.18 %</td></tr><tr><td>MobileNet-V2</td><td>71.9 %</td><td>69.7 %</td><td>70.1 %</td><td>70.3 %</td><td>70.3 %</td></tr><tr><td>ResNext-50</td><td>77.6 %</td><td>76.12%</td><td>76.25 %</td><td>76.33 %</td><td>76.7 %</td></tr></table>
237
+
238
+ Overhead of SMP and FNT We limit our experiments with the proposed SMP method to only two samples. This is to achieve a similar computational overhead as Ultra-low(Sun et al., 2020), with their suggested two-phase-rounding (TPR) which also generates a duplication for the neural gradient quantization. Additional ablation study of the SMP overhead appears in Appendix A.5. The throughput of a 4-bit training network is approximately $8\mathrm{x}$ in comparison to full precision training (Sun et al., 2020). This means that doing one additional epoch in high precision reduces the throughput by $\sim 8\%$ . In comparison, Ultra-low (Sun et al., 2020) does full-training with all the 1x1 convolutions in 8bit, which reduces the throughput by $\sim 50\%$ in comparison to all 4bit training.
239
+
240
+ Forward-backward ablations In Appendix Table 6 we show the Top-1 accuracy in ResNet50 with different quantization schemes. The forward phase (activations + weights) is quantized to INT4 with SAWB (Choi et al., 2018a) and the backward phase (neural gradients) to FP4 with LUQ. As expected, the network is more sensitive to the quantization of the backward phase.
241
+
242
+ # 7 DISCUSSION
243
+
244
+ Conclusions In this work, we analyze the difference between two rounding schemes: round-to-nearest and stochastic-rounding. We showed that, while the former has lower MSE and works better for the quantization of the forward phase (weights and activations), the latter is an unbiased approximation of the original data and works better for the quantization of the backward phase (specifically, the neural gradients).
245
+
246
+ Based on these conclusions and previous works (Chmiel et al. (2021)) that showed the optimally of logarithmic quantization, we propose the first method that combined logarithmic quantization with unbiasedness, with the proposed logarithmic unbiased quantizer (LUQ) which quantize the neural gradients to format FP4 [1,3,0]. Combined with a known method for quantizing the weights and activations to INT4 we achieved, without overhead, state-of-the-art in standard format 4-bit training in all the models we examined, e.g., $1.1\%$ error in ResNet50 vs. $2.49\%$ for the previous known SOTA (Sun et al. (2020), which used non-standard format).
247
+
248
+ Moreover, we suggest two more methods to improve the results, with overhead comparable to Sun et al. (2020). The first reduces the quantization variance, without affecting the unbiasedness of LUQ, by averaging several samples of stochastic neural gradients quantization. The second is a simple method for fine-tuning in high precision for one epoch. Combining all these methods, we were able for the first time to achieve $0.32\%$ error in 4-bit training of ResNet50 ImageNet dataset.
249
+
250
+ Reducing the data movement So far, we focused on resolving the 4-bit GEMM operation bottleneck in DNNs training. It reduces not only the computational resources for the GEMM operations, but also reduces the required memory in DNNs training. However, LUQ, similarly to previous quantization methods (Sun et al., 2020; Choi et al., 2018b;a), requires a statistical measurement of the tensor to define the quantization dynamic range. Specifically, LUQ requires a measurement of the maximum of the neural gradient tensor. Such measurement increases the data movement from and to memory, making this data movement a potential bottleneck in some hardware.
251
+
252
+ In order to avoid this bottleneck when it is a major issue, we verified that LUQ can be combined with the In-hindsight Fournarakis & Nagel (2021) statistics estimation method, which uses a pre-computed measurement to quantize the current tensor and in parallel extract the current statistics for the next iteration. The maximum estimate in LUQ, $\hat{m}$ is calculated as:
253
+
254
+ $$
255
+ \hat {m} ^ {t} = (1 - \eta) \cdot \max \left(\left| x ^ {t - 1} \right|\right) + \eta \cdot \hat {m} ^ {t - 1}, \tag {18}
256
+ $$
257
+
258
+ where $\eta$ is the momentum hyperparameter, and $x$ is tensor of neural gradients. In Appendix A.5.5 we show the effectiveness of this statistic estimation, which eliminates this data movement bottleneck, with a negligible change to the accuracy. Though this method can potentially introduce some bias to the quantization process, the bias seems to be negligible (see Appendix Fig. 7).
259
+
260
+ Reducing the cost of the scaling operation In Appendix A.7 we present an optional method to convert the underflow threshold $\alpha$ (Eq. (11)) to a power-of-two, at a small cost to accuracy. This can reduce or eliminate the computational overhead of the multiplication by $\alpha$ in eq. Eq. (12), which can be significant in some cases.
261
+
262
+ Multiplication free backpropagation In this work, we reduce the GEMM bottleneck by combining two different data-types for the forward (INT4) and backward (FP4) passes. Standard GEMM operations with different formats, require casting the operand to a common datatype before the multiplication. The cost of the casting operation can be significant. We notice, that we are dealing with a special case, where one of the operands includes only a mantissa (weights and activations) and the other only an exponent (neural gradients). Our initial analysis (Appendix A.8) shows that this allows, with small hardware changes, reduction of the area of standard GEMM block by $5 \times$ .
263
+
264
+ Accumulation width A different future direction is to reduce the accumulator width, which is usually kept as FP32. As explained in Appendix A.8, the FP32 accumulator is the most expensive block when training in low bits. Now, after allowing training with 4-bit it is reasonable to think that the accumulator width can be reduced.
265
+
266
+ # ACKNOWLEDGEMENT
267
+
268
+ The research of DS was Funded by the European Union (ERC, A-B-C-Deep, 101039436). Views and opinions expressed are however those of the author only and do not necessarily reflect those of the European Union or the European Research Council Executive Agency (ERCEA). Neither the European Union nor the granting authority can be held responsible for them. DS also acknowledges the support of Schmidt Career Advancement Chair in AI.
269
+
270
+ # REFERENCES
271
+
272
+ Greaphcore ipu. URL https://docs.graphcore.ai/projects/tensorflow1-user-guide/en/latest/tensorflow/random_and_fp.html.
273
+ Habana gaudi. URL https://docs.habana.ai/en/latest/TPC/TPC_C_Language_Spec/Conversions_and_Type_Casting.html#rounding-modes.
274
+ Tesla dojo technology. URL https://tesla-cdn.thron.com/ static/SBY4B9_tesla-dojo-technology_OPNZ0M.pdf?xseo= &response-content-disposition $\equiv$ inline%3Bfilename%3D% 22tesla-dojo-technology.pdf%22.
275
+ Dan Alistarh, Demjan Grubic, Jungshian Li, Ryota Tomioka, and M. Vojnovic. Qsgd: Communication-optimal stochastic gradient descent, with applications to training neural networks. 2016.
276
+ R. Banner, Yury Nahshan, and Daniel Soudry. Post training 4-bit quantization of convolutional networks for rapid-deployment. In NeurIPS, 2019.
277
+ Ron Banner, Itay Hubara, Elad Hoffer, and Daniel Soudry. Scalable methods for 8-bit training of neural networks. In NeurIPS, 2018.
278
+ Jeremy Bernstein, Yu-Xiang Wang, Kamyar Azizzadenesheli, and Anima Anandkumar. signsgd: compressed optimisation for non-convex problems. ArXiv, abs/1802.04434, 2018.
279
+ Yash Bhalgat, Jinwon Lee, Markus Nagel, Tijmen Blankevoort, and Nojun Kwak. Lsq+: Improving low-bit quantization through learnable offsets and better initialization. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 2978-2985, 2020.
280
+ Léon Bottou, Frank E Curtis, and Jorge Nocedal. Optimization methods for large-scale machine learning. Siam Review, 60(2):223-311, 2018.
281
+ Jianfei Chen, Yu Gai, Zhewei Yao, Michael W Mahoney, and Joseph E Gonzalez. A statistical framework for low-bitwidth training of deep neural networks. arXiv preprint arXiv:2010.14298, 2020a.
282
+ Jianfei Chen, Yujie Gai, Z. Yao, M. Mahoney, and Joseph Gonzalez. A statistical framework for low-bitwidth training of deep neural networks. In NeurIPS, 2020b.
283
+ Brian Chmiel, Liad Ben-Uri, Moran Shkolnik, E. Hoffer, Ron Banner, and Daniel Soudry. Neural gradients are lognormally distributed: understanding sparse and quantized training. In ICLR, 2021.
284
+ Jungwook Choi, P. Chuang, Zhuo Wang, Swagath Venkataramani, V. Srinivasan, and K. Gopalakrishnan. Bridging the accuracy gap for 2-bit quantized neural networks (qnn). *ArXiv*, abs/1807.06964, 2018a.
285
+ Jungwook Choi, Zhuo Wang, Swagath Venkataramani, P. Chuang, V. Srinivasan, and K. Gopalakrishnan. Pact: Parameterized clipping activation for quantized neural networks. ArXiv, abs/1805.06085, 2018b.
286
+ Matthieu Courbariaux, Itay Hubara, Daniel Soudry, Ran El-Yaniv, and Yoshua Bengio. Binarized Neural Networks: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1. arXiv e-prints, art. arXiv:1602.02830, February 2016.
287
+
288
+ Marios Fournarakis and Markus Nagel. In-hindsight quantization range estimation for quantized training. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 3057-3064, 2021.
289
+ Manfred Gilli, Dietmar Maringer, and Enrico Schumann. Chapter 6 - generating random numbers. In Manfred Gilli, Dietmar Maringer, and Enrico Schumann (eds.), Numerical Methods and Optimization in Finance (Second Edition), pp. 103-132. Academic Press, second edition edition, 2019. ISBN 978-0-12-815065-8. doi: https://doi.org/10.1016/B978-0-12-815065-8.00017-0. URL https://www.sciencedirect.com/science/article/pii/B9780128150658000170.
290
+ Sasan Iman and Massoud Pedram. *Logic Synthesis for Low Power VLSI Designs*. Kluwer Academic Publishers, USA, 1997. ISBN 0792380762.
291
+ Olga Kupriianova, Christoph Lauter, and Jean-Michel Muller. Radix conversion for ieee754-2008 mixed radix floating-point arithmetic. 2013 Asilomar Conference on Signals, Systems and Computers, Nov 2013. doi: 10.1109/acssc.2013.6810471. URL http://dx.doi.org/10.1109/ACSSC.2013.6810471.
292
+ Yuhang Li, Xin Dong, and Wei Wang. Additive powers-of-two quantization: An efficient non-uniform discretization for neural networks. In ICLR, 2020.
293
+ Tailin Liang, C. John Glossner, Lei Wang, and Shaobo Shi. Pruning and quantization for deep neural network acceleration: A survey. ArXiv, abs/2101.09671, 2021.
294
+ Daisuke Miyashita, Edward H. Lee, and Boris Murmann. Convolutional neural networks using logarithmic data representation. ArXiv, abs/1603.01025, 2016.
295
+ Markus Nagel, Marios Fournarakis, Rana Ali Amjad, Yelysei Bondarenko, Mart van Baalen, and Tijmen Blankevoort. A white paper on neural network quantization. ArXiv, abs/2106.08295, 2021.
296
+ Yury Nahshan, Brian Chmiel, Chaim Baskin, Evgenii Zheltonozhskii, Ron Banner, Alex M. Bronstein, and Avi Mendelson. Loss aware post-training quantization. arXiv preprint arXiv:1911.07190, 2019. URL http://arxiv.org/abs/1911.07190.
297
+ Marc Ortiz, Adrián Cristal, Eduard Ayguade, and Marc Casas. Low-precision floating-point schemes for neural network training. ArXiv, abs/1804.05267, 2018.
298
+ Mohammad Rastegari, Vicente Ordonez, Joseph Redmon, and Ali Farhadi. Xnor-net: Imagenet classification using binary convolutional neural networks. In ECCV, 2016.
299
+ Charbel Sakr and Naresh R Shanbhag. Per-tensor fixed-point quantization of the back-propagation algorithm. 2019.
300
+ Charbel Sakr, Steve Dai, Rangharajan Venkatesan, Brian Zimmer, William J. Dally, and Brucek Khailany. Optimal clipping and magnitude-aware differentiation for improved quantization-aware training. In ICML, 2022.
301
+ Xiao Sun, Jungwook Choi, Chia-Yu Chen, Naigang Wang, Swagath Venkataramani, Vijayalakshmi Srinivasan, Xiaodong Cui, Wei Zhang, and Kailash Gopalakrishnan. Hybrid 8-bit floating point (hfp8) training and inference for deep neural networks. In NeurIPS, 2019.
302
+ Xiao Sun, Naigang Wang, Chia-Yu Chen, Jiamin Ni, A. Agrawal, Xiaodong Cui, Swagath Venkataramani, K. E. Maghraoui, V. Srinivasan, and K. Gopalakrishnan. Ultra-low precision 4-bit training of deep neural networks. In NeurIPS, 2020.
303
+ Naigang Wang, Jungwook Choi, Daniel Brand, Chia-Yu Chen, and K. Gopalakrishnan. Training deep neural networks with 8-bit floating point numbers. In NeurIPS, 2018.
304
+ Simon Wiedemann, Temesgen Mehari, Kevin Kepp, and W. Samek. Dithered backprop: A sparse and quantized backpropagation algorithm for more efficient deep neural network training. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 3096-3104, 2020.
305
+ Kai Zhong, Xuefei Ning, Guohao Dai, Zhenhua Zhu, Tianchen Zhao, Shulin Zeng, Yu Wang, and Huazhong Yang. Exploring the potential of low-bit training of convolutional neural networks. IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 2022.
accurateneuraltrainingwith4bitmatrixmultiplicationsatstandardformats/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:873e28a7dbfba3580050b1344ff561ece8ab780961a674f4b179a243b0c55d73
3
+ size 316937
accurateneuraltrainingwith4bitmatrixmultiplicationsatstandardformats/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cf025ea9d3983ef1bb0a87a723004347b49e4fcf3f104b8573e186ab0c4a939
3
+ size 375896
achievetheminimumwidthofneuralnetworksforuniversalapproximation/5fae6e40-789a-4c59-8190-108252522d3c_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae12d5ea1eb50906729cdc26c959cb7cd63b39e8c8eb84c4e8b7fe60620a654d
3
+ size 95765
achievetheminimumwidthofneuralnetworksforuniversalapproximation/5fae6e40-789a-4c59-8190-108252522d3c_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6d53e927fd639e119181575f4a650a75e31f78cf20249598b7044fc0fba5654
3
+ size 114042
achievetheminimumwidthofneuralnetworksforuniversalapproximation/5fae6e40-789a-4c59-8190-108252522d3c_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6f0a2d67342343357d1e1da493e694a365cb017700067c00bdb7d97032ad93a
3
+ size 914207
achievetheminimumwidthofneuralnetworksforuniversalapproximation/full.md ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ACHIEVE THE MINIMUM WIDTH OF NEURAL NETWORKS FOR UNIVERSAL APPROXIMATION
2
+
3
+ Yongqiang Cai*
4
+
5
+ Beijing Normal University
6
+
7
+ caiq.math@bnu.edu.cn
8
+
9
+ # ABSTRACT
10
+
11
+ The universal approximation property (UAP) of neural networks is fundamental for deep learning, and it is well known that wide neural networks are universal approximators of continuous functions within both the $L^p$ norm and the continuous/uniform norm. However, the exact minimum width, $w_{\mathrm{min}}$ , for the UAP has not been studied thoroughly. Recently, using a decoder-memorizer-encoder scheme, Park et al. (2021) found that $w_{\mathrm{min}} = \max(d_x + 1, d_y)$ for both the $L^p$ -UAP of ReLU networks and the $C$ -UAP of ReLU+STEP networks, where $d_x, d_y$ are the input and output dimensions, respectively. In this paper, we consider neural networks with an arbitrary set of activation functions. We prove that both $C$ -UAP and $L^p$ -UAP for functions on compact domains share a universal lower bound of the minimal width; that is, $w_{\mathrm{min}}^* = \max(d_x, d_y)$ . In particular, the critical width, $w_{\mathrm{min}}^*$ , for $L^p$ -UAP can be achieved by leaky-ReLU networks, provided that the input or output dimension is larger than one. Our construction is based on the approximation power of neural ordinary differential equations and the ability to approximate flow maps by neural networks. The nonmonotone or discontinuous activation functions case and the one-dimensional case are also discussed.
12
+
13
+ # 1 INTRODUCTION
14
+
15
+ The study of the universal approximation property (UAP) of neural networks is fundamental for deep learning and has a long history. Early studies, such as Cybenkot (1989); Hornik et al. (1989); Leshno et al. (1993), proved that wide neural networks (even shallow ones) are universal approximators for continuous functions within both the $L^p$ norm $(1 \leq p < \infty)$ and the continuous/uniform norm. Further research, such as Telgarsky (2016), indicated that increasing the depth can improve the expression power of neural networks. If the budget number of the neuron is fixed, the deeper neural networks have better expression power Yarotsky & Zhevnerchuk (2020); Shen et al. (2022). However, this pattern does not hold if the width is below a critical threshold $w_{\mathrm{min}}$ . Lu et al. (2017) first showed that the ReLU networks have the UAP for $L^1$ functions from $\mathbb{R}^{d_x}$ to $\mathbb{R}$ if the width is larger than $d_x + 4$ , and the UAP disappears if the width is less than $d_x$ . Further research, Hanin & Sellke (2017); Kidger & Lyons (2020); Park et al. (2021), improved the minimum width bound for ReLU networks. Particularly, Park et al. (2021) revealed that the minimum width is $w_{\mathrm{min}} = \max(d_x + 1, d_y)$ for the $L^p(\mathbb{R}^{d_x}, \mathbb{R}^{d_y})$ UAP of ReLU networks and for the $C(\mathcal{K}, \mathbb{R}^{d_y})$ UAP of ReLU+STEP networks, where $\mathcal{K}$ is a compact domain in $\mathbb{R}^{d_x}$ .
16
+
17
+ For general activation functions, the exact minimum width $w_{\mathrm{min}}$ for UAP is less studied. Johnson (2019) consider uniformly continuous activation functions that can be approximated by a sequence of one-to-one functions and give a lower bound $w_{\mathrm{min}} \geq d_x + 1$ for $C$ -UAP (means UAP for $C(\mathcal{K}, \mathbb{R}^{d_y})$ ). Kidger & Lyons (2020) consider continuous nonpolynomial activation functions and give an upper bound $w_{\mathrm{min}} \leq d_x + d_y + 1$ for $C$ -UAP. Park et al. (2021) improved the bound for $L^p$ -UAP (means UAP for $L^p(\mathcal{K}, \mathbb{R}^{d_y})$ ) to $w_{\mathrm{min}} \leq \max(d_x + d_y)$ .
18
+
19
+ $2, d_y + 1)$ . A summary of known upper/ lower bounds on minimum width for the UAP can be found in Park et al. (2021).
20
+
21
+ In this paper, we consider neural networks having the UAP with arbitrary activation functions. We give a universal lower bound, $w_{\mathrm{min}} \geq w_{\mathrm{min}}^* = \max(d_x, d_y)$ , to approximate functions from a compact domain $\mathcal{K} \subset \mathbb{R}^{d_x}$ to $\mathbb{R}^{d_y}$ in the $L^p$ norm or continuous norm. Furthermore, we show that the critical width $w_{\mathrm{min}}^*$ can be achieved by many neural networks, as listed in Table 1. Surprisingly, the leaky-ReLU networks achieve the critical width for the $L^p$ -UAP provided that the input or output dimension is larger than one. This result relies on a novel construction scheme proposed in this paper based on the approximation power of neural ordinary differential equations (ODEs) and the ability to approximate flow maps by neural networks.
22
+
23
+ Table 1: Summary of the known minimum width of feed-forward neural networks that have the universal approximation property.
24
+
25
+ <table><tr><td>Functions</td><td>Activation</td><td>Minimum width</td><td>References</td></tr><tr><td>C(K, R)</td><td>ReLU</td><td>wmin = dx + 1</td><td>Hanin &amp; Sellke (2017)</td></tr><tr><td>Lp(Rdxdy)</td><td>ReLU</td><td>wmin = max(dx + 1, dy)</td><td>Park et al. (2021)</td></tr><tr><td>C([0, 1], R2)</td><td>ReLU</td><td>wmin = 3 = max(dx, dy) + 1</td><td>Park et al. (2021)</td></tr><tr><td>C(K, Rdy)</td><td>ReLU+STEP</td><td>wmin = max(dx + 1, dy)</td><td>Park et al. (2021)</td></tr><tr><td>Lp(K, Rdy)</td><td>Conti. nonpoly‡</td><td>wmin ≤ max(dx + 2, dy) + 1</td><td>Park et al. (2021)</td></tr><tr><td rowspan="3">Lp(K, Rdy)</td><td>Arbitrary</td><td>wmin ≥ max(dx, dy) := w*min</td><td>Ours (Lemma 1)</td></tr><tr><td>Leaky-ReLU</td><td>wmin = max(dx, dy, 2)</td><td>Ours (Theorem 2)</td></tr><tr><td>Leaky-ReLU+ABS</td><td>wmin = max(dx, dy)</td><td>Ours (Theorem 3)</td></tr><tr><td rowspan="3">C(K, Rdy)</td><td>Arbitrary</td><td>wmin ≥ max(dx, dy) := w*min</td><td>Ours (Lemma 1)</td></tr><tr><td>ReLU+FLOOR</td><td>wmin = max(dx, dy, 2)</td><td>Ours (Lemma 4)</td></tr><tr><td>UOE†+FLOOR</td><td>wmin = max(dx, dy)</td><td>Ours (Corollary 6)</td></tr><tr><td>C([0, 1], Rdy)</td><td>UOE†</td><td>wmin = dy</td><td>Ours (Theorem 5)</td></tr></table>
26
+
27
+ $\ddagger$ Continuous nonpolynomial $\rho$ that is continuously differentiable at some $z$ with $\rho'(z) \neq 0$ .
28
+ † UOE means the function having universal ordering of extrema, see Definition 7
29
+
30
+ # 1.1 CONTRIBUTIONS
31
+
32
+ 1) Obtained the universal lower bound of width $w_{\mathrm{min}}^*$ for feed-forward neural networks (FNNs) that have universal approximation properties.
33
+ 2) Achieved the critical width $w_{\mathrm{min}}^*$ by leaky-ReLU+ABS networks and UOE+FLOOR networks. (UOE is a continuous function which has universal ordering of extrema. It is introduced to handle $C$ -UAP for one-dimensional functions. See Definition 7.)
34
+ 3) Proposed a novel construction scheme from a differential geometry perspective that could deepen our understanding of UAP through topology theory.
35
+
36
+ # 1.2 RELATED WORK
37
+
38
+ To obtain the exact minimum width, one must verify the lower and upper bounds. Generally, the upper bounds are obtained by construction, while the lower bounds are obtained by counterexamples.
39
+
40
+ Lower bounds. For ReLU networks, Lu et al. (2017) utilized the disadvantage brought by the insufficient size of the dimensions and proved a lower bound $w_{\mathrm{min}} \geq d_x$ for $L^1$ -UAP; Hanin & Sellke (2017) considered the compactness of the level set and proved a lower bound $w_{\mathrm{min}} \geq d_x + 1$ for $C$ -UAP. For monotone activation functions or its variants, Johnson (2019) noticed that functions represented by networks with width $d_x$ have unbounded level sets, and Beise & Da Cruz (2020) noticed that such functions on a compact domain $\mathcal{K}$ take their maximum value on the boundary $\partial \mathcal{K}$ . These properties allow one to construct counterexamples and give a lower bound $w_{\mathrm{min}} \geq d_x + 1$ for $C$ -UAP. For general activation
41
+
42
+ functions, Park et al. (2021) used the volume of simplex in the output space and gave a lower bound $w_{\min} \geq d_y$ for either $L^p$ -UAP or $C$ -UAP. Our universal lower bound, $w_{\min} \geq \max(d_x, d_y)$ , is based on the insufficient size of the dimensions for both the input and output space, which combines the ideas from these references above.
43
+
44
+ Upper bounds. For ReLU networks, Lu et al. (2017) explicitly constructed a width- $(d_x + 4)$ network by concatenating a series of blocks so that the whole network can be approximated by scale functions in $L^1(\mathbb{R}^{d_x}, \mathbb{R})$ to any given accuracy. Hanin & Sellke (2017); Hanin (2019) constructed a width- $(d_x + d_y)$ network using the max-min string approach to achieve $C$ -UAP for functions on compact domains; Park et al. (2021) proposed an encoder-memorizer-decoder scheme that achieves the optimal bounds $w_{\mathrm{min}} = \max(d_x + 1, d_y)$ of the UAP for $L^p(\mathbb{R}^{d_x}, \mathbb{R}^{d_y})$ . For general activation functions, Kidger & Lyons (2020) proposed a register model construction that gives an upper bound $w_{\mathrm{min}} \leq d_x + d_y + 1$ for $C$ -UAP. Based on this result, Park et al. (2021) improved the upper bound to $w_{\mathrm{min}} \leq \max(d_x + 2, d_y + 1)$ for $L^p$ -UAP. In this paper, we adopt the encoder-memorizer-decoder scheme to calculate the universal critical width for $C$ -UAP by ReLU+FLOOR activation functions. However, the floor function is discontinuous. For $L^p$ -UAP, we reach the critical width by leaky-ReLU, which is a continuous network using a novel scheme based on the approximation power of neural ODEs.
45
+
46
+ ResNet and neural ODEs. Although our original aim is the UAP for feed-forward neural networks, our construction is related to the neural ODEs and residual networks (ResNet, He et al. (2016)), which include skipping connections. Many studies, such as E (2017); Lu et al. (2018); Chen et al. (2018), have emphasized that ResNet can be regarded as the Euler discretization of neural ODEs. The approximation power of ResNet and neural ODEs have also been examined by researchers. To list a few, Li et al. (2022) gave a sufficient condition that covers most networks in practice so that the neural ODE/dynamic systems (without extra dimensions) process $L^p$ -UAP for continuous functions, provided that the spatial dimension is larger than one; Ruiz-Balet & Zuazua (2021) obtained similar results focused on the case of one-hidden layer fields. Tabuada & Gharesifard (2020) obtained the $C$ -UAP for monotone functions, and for continuous functions it was obtained by adding one extra spatial dimension. Recently, Duan et al. (2022) noticed that the FNN could also be a discretization of neural ODEs, which motivates us to construct networks achieving the critical width by inheriting the approximation power of neural ODEs. For the excluded dimension one, we design an approximation scheme with leaky-ReLU+ABS and UOE activation functions.
47
+
48
+ # 1.3 ORGANIZATION
49
+
50
+ We formally state the main results and necessary notations in Section 2. The proof ideas are given in Section 3.4, and 5. In Section 3, we consider the case where $N = d_x = d_y = 1$ , which is basic for the high-dimensional cases. The construction is based on the properties of monotone functions. In Section 4, we prove the case where $N = d_x = d_y \geq 2$ . The construction is based on the approximation power of neural ODEs. In Section 5, we consider the case where $d_x \neq d_y$ and discuss the case of more general activation functions. Finally, we conclude the paper in Section 6. All formal proofs of the results are presented in the Appendix.
51
+
52
+ # 2 MAIN RESULTS
53
+
54
+ In this paper, we consider the standard feed-forward neural network with $N$ neurons at each hidden layer. We say that a $\sigma$ network with depth $L$ is a function with inputs $x\in \mathbb{R}^{d_x}$ and outputs $y\in \mathbb{R}^{d_y}$ , which has the following form:
55
+
56
+ $$
57
+ y \equiv f _ {L} (x) = W _ {L + 1} \sigma \left(W _ {L} \left(\dots \sigma \left(W _ {1} x + b _ {1}\right) + \dots\right) + b _ {L}\right) + b _ {L + 1}, \tag {1}
58
+ $$
59
+
60
+ where $b_{i}$ are bias vectors, $W_{i}$ are weight matrices, and $\sigma(\cdot)$ is the activation function. For the case of multiple activation functions, for instance, $\sigma_{1}$ and $\sigma_{2}$ , we call $f_{L}$ a $\sigma_{1} + \sigma_{2}$ network. In this situation, the activation function of each neuron is either $\sigma_{1}$ or $\sigma_{2}$ . In this paper, we consider arbitrary activation functions, while the following activation functions are emphasized: ReLU $(\max(x,0))$ , leaky-ReLU $(\max(x,\alpha x),\alpha \in (0,1)$ is a fixed positive
61
+
62
+ parameter), ABS $(|x|)$ , SIN $(\sin(x))$ , STEP $(1_{x > 0})$ , FLOOR $(\lfloor x \rfloor)$ and UOE (universal ordering of extrema, which will be defined later).
63
+
64
+ Lemma 1. For any compact domain $\mathcal{K} \subset \mathbb{R}^{d_x}$ and any finite set of activation functions $\{\sigma_i\}$ , the $\{\sigma_i\}$ networks with width $w < w_{\min}^* \equiv \max(d_x, d_y)$ do not have the UAP for both $L^p(\mathcal{K}, \mathbb{R}^{d_y})$ and $C(\mathcal{K}, \mathbb{R}^{d_y})$ .
65
+
66
+ $L^p$ -UAP and $C$ -UAP. The lemma indicates that $w_{\min}^{*} \equiv \max(d_x, d_y)$ is a universal lower bound for the UAP in both $L^p(\mathcal{K}, \mathbb{R}^{d_y})$ and $C(\mathcal{K}, \mathbb{R}^{d_y})$ . The main result of this paper illustrates that the minimal width $w_{\min}^{*}$ can be achieved. We consider the UAP for these two function classes, i.e., $L^p$ -UAP and $C$ -UAP, respectively. Note that any compact domain can be covered by a big cubic, the functions on the former can be extended to the latter, and the cubic can be mapped to the unit cubic by a linear function. This allows us to assume $\mathcal{K}$ to be a (unit) cubic without loss of generality.
67
+
68
+ # 2.1 $L^p$ -UAP
69
+
70
+ Theorem 2. Let $\mathcal{K} \subset \mathbb{R}^{d_x}$ be a compact set; then, for the function class $L^p(\mathcal{K}, \mathbb{R}^{d_y})$ , the minimum width of leaky-ReLU networks having $L^p$ -UAP is exactly $w_{\min} = \max(d_x, d_y, 2)$ .
71
+
72
+ The theorem indicates that leaky-ReLU networks achieve the critical width $w_{\mathrm{min}}^* = \max(d_x, d_y)$ , except for the case of $d_x = d_y = 1$ . The idea is to consider the case where $d_x = d_y = d > 1$ and let the network width equal $d$ . According to the results of Duan et al. (2022), leaky-ReLU networks can approximate the flow map of neural ODEs. Thus, we can use the approximation power of neural ODEs to finish the proof. Li et al. (2022) proved that many neural ODEs could approximate continuous functions in the $L^p$ norm. This is based on the fact that orientation preserving diffeomorphisms can approximate continuous functions Brenier & Gangbo (2003).
73
+
74
+ The exclusion of dimension one is because of the monotonicity of leaky ReLU. When we add a nonmonotone activation function such as the absolute value function or sine function, the $L^p$ -UAP at dimension one can be achieved.
75
+
76
+ Theorem 3. Let $\mathcal{K} \subset \mathbb{R}^{d_x}$ be a compact set; then, for the function class $L^p(\mathcal{K}, \mathbb{R}^{d_y})$ , the minimum width of leaky-ReLU+ABS networks having $L^p$ -UAP is exactly $w_{\min} = \max(d_x, d_y)$ .
77
+
78
+ # 2.2 $C$ -UAP
79
+
80
+ $C$ -UAP is more demanding than $L^p$ -UAP. However, if the activation functions could include discontinuous functions, the same critical width $w_{\mathrm{min}}^*$ can be achieved. Following the encoder-memory-decoder approach in Park et al. (2021), the step function is replaced by the floor function, and one can obtain the minimal width $w_{\mathrm{min}} = \max(d_x, 2, d_y)$ .
81
+
82
+ Lemma 4. Let $\mathcal{K} \subset \mathbb{R}^{d_x}$ be a compact set; then, for the function class $C(\mathcal{K},\mathbb{R}^{d_y})$ , the minimum width of $\text{ReLU} + \text{FLOOR}$ networks having $C$ -UAP is exactly $w_{\min} = \max(d_x,2,d_y)$ .
83
+
84
+ Since ReLU and FLOOR are monotone functions, the $C$ -UAP critical width $w_{\mathrm{min}}^*$ does not hold for $C([0,1],\mathbb{R})$ . This seems to be the case even if we add ABS or SIN as an additional activator. However, it is still possible to use the UOE function (Definition 12).
85
+
86
+ Theorem 5. The UOE networks with width $d_y$ have $C$ -UAP for functions in $C([0,1],\mathbb{R}^{d_y})$ .
87
+
88
+ Corollary 6. Let $\mathcal{K} \subset \mathbb{R}^{d_x}$ be a compact set; then, for the continuous function class $C(\mathcal{K},\mathbb{R}^{d_y})$ , the minimum width of $UOE + FLOOR$ networks having $C$ -UAP is exactly $w_{\min} = \max(d_x,d_y)$ .
89
+
90
+ # 3 APPROXIMATION IN DIMENSION ONE $(N = d_{x} = d_{y} = d = 1)$
91
+
92
+ In this section, we consider one-dimensional functions and neural networks with a width of one. In this case, the expression of ReLU networks is extremely poor. Therefore, we consider the leaky ReLU activation $\sigma_{\alpha}(x)$ with a fixed parameter $\alpha \in (0,1)$ . Note that leaky-ReLU is strictly monotonic, and it was proven by Duan et al. (2022) that any monotone function in
93
+
94
+ $C([0,1],\mathbb{R})$ can be uniformly approximated by leaky-ReLU networks with width one. This is useful for our construction to approximate nonmonotone functions. Since the composition of monotone functions is also a monotone function, to approximate nonmonotone functions we need to add a nonmonotone activation function.
95
+
96
+ Let us consider simple nonmonotone functions, such as $|x|$ or $\sin(x)$ . We show that leaky-ReLU+ABS or leaky-ReLU+SIN can approximate any continuous function $f^{*}(x)$ under the $L^p$ norm. The idea, shown in Figure 1, is that the target function $f^{*}(x)$ can be uniformly approximated by the polynomial $p(x)$ , which can be represented as the composition
97
+
98
+ $$
99
+ g \circ u (x) = p (x) \approx f ^ {*} (x).
100
+ $$
101
+
102
+ Here, the outer function $g(x)$ is any continuous function whose value at extrema matches the value at extrema of $p(x)$ , and the inner function $u(x)$ is monotonically increasing, which adjusts the location of the extrema (see Figure 1). Since polynomials have a finite number of extrema, the inner function $u(x)$ is piecewise continuous.
103
+
104
+ ![](images/4c7c6b8e6515f9277aa7e41fbe2fed653d937f1afad98b02b980ec5242f567f5.jpg)
105
+ Figure 1: Example of approximating/representing a polynomial by the composition of a monotonically increasing function $u(x)$ and a nonmonotone function $g(x)$ . (a) only matching the ordering of extrema values, (b) matching the values as well.
106
+
107
+ ![](images/8683328c65ac589fd988fda71fe7bc0c81aa8dcf2a14fb18d533af9fe9f49901.jpg)
108
+
109
+ For $L^p$ -UAP, the approximation is allowed to have a large deviation on a small interval; therefore, the extrema could not be matched exactly (over a small error). For example, we can choose $g(x)$ as the sine function or the sawtooth function (which can be approximated by ABS networks), and $u(x)$ is a leaky-ReLU network approximating $g^{-1} \circ p(x)$ at each monotone interval of $p$ . Figure 1(a) shows an example of the composition.
110
+
111
+ For $C$ -UAP, matching the extrema while keeping the error small is needed. To achieve this aim, we introduce the UOE functions.
112
+
113
+ Definition 7 (Universal ordering of extrema (UOE) functions). A UOE function is a continuous function in $C(\mathbb{R},\mathbb{R})$ such that any (finite number of) possible ordering(s) of values at the (finite) extrema can be found in the extrema of the function.
114
+
115
+ There are an infinite number of UOE functions. Here, we give an example, as shown in Figure 2. This UOE function $\rho(x)$ is defined by a sequence $\{o_i\}_{i=1}^{\infty}$ ,
116
+
117
+ $$
118
+ \rho (x) = \left\{ \begin{array}{l l} x / 4, & x \leq 0, \\ o _ {i} + (x - i) \left(o _ {i + 1} - o _ {i}\right), & x \in [ i, i + 1), \end{array} \right. \tag {2}
119
+ $$
120
+
121
+ where $\{o_i\}_{i=1}^{\infty} = (1,2,2,1,1,2,3,1,3,2,2,1,3,3,2,3,1,3,1,2,3,2,1,1,2,3,4,\ldots)$ is the concatenation of all permutations of positive integer numbers. The term UOE in this paper means this function $\rho$ . Since the UOE function $\rho(x)$ can represent leaky-ReLU $\sigma_{1/4}$ on any finite interval, this implies that the UOE networks can uniformly approximate any monotone functions.
122
+
123
+ To illustrate the $C$ -UAP of UOE networks, we only need to construct a continuous function $g(x)$ matching the extrema of $p(x)$ (see Figure 1(b)). That is, construct $g(x)$ by the composition $\tilde{u} \circ \rho(x)$ , where $\tilde{u}(x)$ is a monotone and continuous function. This is possible since the UOE function contains any ordering of the extrema.
124
+
125
+ The following lemma summarizes the approximation of one-dimensional functions. As a consequence, Theorem 5 holds since functions in $C([0,1],\mathbb{R}^{d_y})$ can be regarded as $d_{y}$ one-dimensional functions.
126
+
127
+ ![](images/ccbccea36001766fa936f977f204461b7f9cb389a65fceaf0024de229d80e7df.jpg)
128
+ Figure 2: An example of the UOE function $\rho(x)$ , which has an infinite number of pieces.
129
+
130
+ Lemma 8. For any function $f^{*}(x) \in C[0,1]$ and $\varepsilon > 0$ , there is a leaky-ReLU+ABS (or leaky-ReLU+SIN) network with width one and depth $L$ such that $\int_0^1 |f^* (x) - f_L(x)|^p dx < \varepsilon^p$ . There is a leaky-ReLU+UOE network with a width of one and a depth of $L$ such that $|f^{*}(x) - f_{L}(x)| < \varepsilon, \forall x \in [0,1]$ .
131
+
132
+ # 4 CONNECTION TO THE NEURAL ODES $(N = d_{x} = d_{y} = d\geq 2)$
133
+
134
+ Now, we turn to the high-dimensional case and connect the feed-forward neural networks to neural ODEs. To build this connection, we assume that the input and output have the same dimension, $d_x = d_y = d$ .
135
+
136
+ Consider the following neural ODE with one-hidden layer neural fields:
137
+
138
+ $$
139
+ \left\{ \begin{array}{l} \dot {x} (t) = v (x (t), t) := A (t) \tanh (W (t) x (t) + b (t)), t \in (0, \tau), \\ x (0) = x _ {0}, \end{array} \right. \tag {3}
140
+ $$
141
+
142
+ where $x, x_0, \in \mathbb{R}^d$ and the time-dependent parameters $(A, W, b) \in \mathbb{R}^{d \times d} \times \mathbb{R}^{d \times d} \times \mathbb{R}^d$ are piecewise constant functions of $t$ . The flow map is denoted as $\phi^\tau(\cdot)$ , which is the function from $x_0$ to $x(\tau)$ . According to the approximation results of neural ODEs (see Li et al. (2022); Tabuada & Gharesifard (2020); Ruiz-Balet & Zuazua (2021) for examples), we have the following lemma.
143
+
144
+ Lemma 9 (Special case of Li et al. (2022)). Let $d \geq 2$ . Then, for any continuous function $f^{*}:\mathbb{R}^{d}\to \mathbb{R}^{d}$ , any compact set $\kappa \subset \mathbb{R}^d$ , and any $\varepsilon >0$ , there exist a time $\tau \in \mathbb{R}^{+}$ and a piecewise constant input $(A,W,b):[0,\tau ]\to \mathbb{R}^{d\times d}\times \mathbb{R}^{d\times d}\times \mathbb{R}^{d}$ so that the flow-map $\phi^{\tau}$ associated with the neural ODE (3) satisfies: $\| f^{*} - \phi^{\tau}\|_{L^{p}(\mathcal{K})}\leq \varepsilon$
145
+
146
+ Next, we consider the approximation of the flow map associated with (3) by neural networks. Recently, Duan et al. (2022) found that leaky-ReLU networks could perform such approximations.
147
+
148
+ Lemma 10 (Theorem 2.2 in Duan et al. (2022)). If the parameters $(A,W,b)$ in (3) are piecewise constants, then for any compact set $\mathcal{K}$ and any $\varepsilon >0$ , there is a leaky-ReLU network $f_{L}(x)$ with width $d$ and depth $L$ such that
149
+
150
+ $$
151
+ \left\| \phi^ {\tau} (x) - f _ {L} (x) \right\| \leq \varepsilon , \forall x \in \mathcal {K}. \tag {4}
152
+ $$
153
+
154
+ Combining these two lemmas, one can directly prove the following corollary, which is a part of our Theorem 2.
155
+
156
+ Corollary 11. Let $\mathcal{K} \subset \mathbb{R}^d$ be a compact set and $d \geq 2$ ; then, for the function class $L^p(\mathcal{K}, \mathbb{R}^d)$ , the leaky-ReLU networks with width $d$ have $L^p$ -UAP.
157
+
158
+ Here, we summarize the main ideas of this result. Let us start with the discretization of the ODE by the splitting approach (see McLachlan & Quispel (2002) for example). Consider the splitting of (3) with $v(x,t) = \sum_{i,j}v_i^{(j)}(x,t)e_j$ , where $v_i^{(j)}(x,t) = A_{ji}(t)\tanh (W_{i,:}(t)x + b_i(t))$ is a scalar function and $e_j$ is the $j$ -th axis unit vector. Then for a given time step $\Delta t = \tau /K$ , ( $K$ large enough), the splitting method gives the following iteration of $x_k$ which approximates $\phi^{k\Delta t}(x_0)$ ,
159
+
160
+ $$
161
+ x _ {k + 1} = T _ {k} ^ {(d, d)} \circ \dots \circ T _ {k} ^ {(1, 2)} \circ T _ {k} ^ {(1, 1)} x _ {k}, \tag {5}
162
+ $$
163
+
164
+ where the map $T_{k}^{(i,j)}: x \to y$ is defined as
165
+
166
+ $$
167
+ \left\{ \begin{array}{l} y ^ {(l)} = x ^ {(l)}, l \neq j, \\ y ^ {(j)} = x ^ {(j)} + \Delta t v _ {i} ^ {(j)} (x, k \Delta t) = x ^ {(j)} + a \Delta t \tanh (w x + \beta). \end{array} \right. \tag {6}
168
+ $$
169
+
170
+ Here the superscript in $x^{(l)}$ means the $l$ -th coordinate of $x$ . $a = A_{ji}$ , $w = W_{i}$ ; and $\beta = b_i$ take their value at $t = k\Delta t$ . Note that the scalar functions $\tanh(\xi)$ and $\xi + a\Delta t \tanh(\xi)$ are monotone with respect to $\xi$ when $\Delta t$ is small enough. This allows us to construct leaky-ReLU networks with width $d$ to approximate each map $T_k^{(i,j)}$ and then approximate the flow-map, $\phi^\tau(x_0) \approx x_K$ .
171
+
172
+ Note that Lemma 10 holds for all dimensions, while Lemma 9 holds for dimensions larger than one. This is because flow maps are orientation-preserving diffeomorphisms, and they can approximate continuous functions only for dimensions larger than one; see Brenier & Gangbo (2003). The approximation is based on control theory where the flow map can be adjusted to match any finite set of input-output pairs. This match does not hold for dimension one. However, the case of dimension one is discussed in the last section.
173
+
174
+ # 5 ACHIEVING THE MINIMAL WIDTH
175
+
176
+ Now, we turn to the cases where the input and output dimensions cannot be equal.
177
+
178
+ # 5.1 UNIVERSAL LOWER BOUND $w_{\mathrm{min}}^* = \max (d_x, d_y)$
179
+
180
+ Here, we give a sketch of the proof of Lemma 1, which states that $w_{\mathrm{min}}^*$ is a universal lower bound over all activation functions. Parts of Lemma 1 have been demonstrated in many papers, such as Park et al. (2021). Here, we give proof by two counterexamples that are simple and easy to understand from the topological perspective. It contains two cases: 1) there is a function $f^*$ that cannot be approximated by networks with width $w \leq d_x - 1$ ; 2) there is a function $f^*$ that cannot be approximated by networks with width $w \leq d_y - 1$ . Figure 3(a)-(b) shows the counterexamples that illustrate the essence of the proof.
181
+
182
+ For the first case, $w \leq d_x - 1$ , we show that $f^{*}(x) = \| x\|^{2}, x \in \mathcal{K} = [-2,2]^{d_{x}}$ , is what we want; see Figure 3(a). In fact, we can relax the networks to a function $f(x) = \phi (Wx + b)$ where $Wx + b$ is a transformer from $\mathbb{R}^{d_x}$ to $\mathbb{R}^{d_x - 1}$ and $\phi (x)$ could be any function. A consequence is that there exists a direction $v$ (set as the vector satisfying $Wv = 0$ , $\| v\| = 1$ ) such that $f(x) = f(x + \lambda v)$ for all $\lambda \in \mathbb{R}$ . Then, considering the sets $A = \{x:\| x\| \leq 0.1\}$ and $B = \{x:\| x - v\| \leq 0.1\}$ , we have
183
+
184
+ $$
185
+ \begin{array}{l} \int_ {\mathcal {K}} | f (x) - f ^ {*} (x) | d x \geq \int_ {A} | f (x) - f ^ {*} (x) | d x + \int_ {B} | f (x) - f ^ {*} (x) | d x \\ \geq \int_ {A} (| f (x) - f ^ {*} (x) | + | f (x + v) - f ^ {*} (x + v) |) d x \\ \geq \int_ {A} \left(\left| f ^ {*} (x) - f ^ {*} (x + v) \right|\right) d x \geq 0. 8 | A |. \\ \end{array}
186
+ $$
187
+
188
+ Since the volume of $A$ is a fixed positive number, the inequality implies that even the $L^1$ approximation for $f^*$ is impossible. The case of the $L^p$ norm and the uniform norm is impossible as well.
189
+
190
+ For the second case, $w \leq d_y - 1$ , we show the example of $f^*$ , which is the parametrized curve from $\mathbf{0}$ to $\mathbf{1}$ along the edge of the cubic, see Figure 3(b). Relaxing the networks to a function $f(x) = W\psi(x) + b$ , $\psi(x)$ could be any function. Since the range of $f$ is in a hyperplane while $f^*$ has a positive distance to any hyperplane, the target $f^*$ cannot be approximated.
191
+
192
+ # 5.2 ACHIEVING $w_{\mathrm{min}}^*$ FOR $L^p$ -UAP
193
+
194
+ Now, we show that the lower bound $w_{\mathrm{min}}^*$ for $L^p$ -UAP can be achieved by leaky-ReLU+ABS networks. Without loss of generality, we consider $\mathcal{K} = [0,1]^{d_x}$ .
195
+
196
+ For any function $f^{*}$ in $L^{p}([0,1]^{d_{x}},\mathbb{R}^{d_{y}})$ , we can extend it to a function $\tilde{f}^{*}$ in $L^{p}([0,1]^{d},\mathbb{R}^{d})$ by filling in zeros where $d = \max (d_x,d_y) = w_{\mathrm{min}}^*$ . When $d_{x} > 1$ or $d_{y} > 1$ , the $L^p$ -UAP for leaky-ReLU networks with width $w_{\mathrm{min}}^{*}$ is obtained by using Corollary 11. Recall that by the Lemma 1, $w_{\mathrm{min}}^{*}$ is optimal, and we obtain our main result Theorem 2.
197
+
198
+ Combining the case of $d_x = d_y = d = 1$ in Section 3, adding absolute function ABS as an additional activation function, we obtain Theorem 3.
199
+
200
+ # 5.3 ACHIEVING $w_{\mathrm{min}}^*$ FOR $C$ -UAP
201
+
202
+ Here, we use the encoder-memorizer-decoder approach proposed in Park et al. (2021) to achieve the minimum width. Without loss of generality, we consider the function class $C([0,1]^{d_x}, [0,1]^{d_y})$ . The encoder-memorizer-decoder approach includes three parts:
203
+
204
+ 1) an encoder maps $[0,1]^{d_x}$ to $[0,1]$ which quantizes each coordinate of $x$ by a $K$ -bit binary representation and concatenates the quantized coordinates into a single scalar value $\bar{x}$ having a $(d_xK)$ -bit binary representation;
205
+ 2) a memorizer maps each codeword $\bar{x}$ to its target codeword $\bar{y}$ ;
206
+ 3) a decoder maps $\bar{y}$ to the quantized target that approximates the true target.
207
+
208
+ As illustrated in Figure 3(c), using the floor function instead of a step function, one can construct the encoder by FLOOR networks with width $d_x$ and the decoder by FLOOR networks with width $d_y$ . The memorizer is a one-dimensional scalar function that can be approximated by ReLU networks with a width of two or UOE networks with a width of one. Therefore, the minimal widths $\max(d_x, 2, d_y)$ and $\max(d_x, d_y)$ are obtained, which demonstrate Lemma 4 and Corollary 6, respectively.
209
+
210
+ ![](images/0e063df62b359822028c0b8bb401c68906a4dad5e039db06a31402803d8305db.jpg)
211
+ Figure 3: (a)(b) Counterexamples for proving Lemma 1. (a) Points $A$ and $B$ on a level set of networks $f(x); f(A) = f(B)$ but $f^{*}(A) - f^{*}(B)$ is not small. (b) The curve from 0 to 1 along the edge of the cubic has a positive distance to any hyperplane. (c) illustration of the encoder-memorizer-decoder scheme for $C$ -UAP by an example where $d_{x} = d_{y} = 3$ , 4 bits for the input and 5 bits for the output.
212
+
213
+ # 5.4 EFFECT OF THE ACTIVATION FUNCTIONS
214
+
215
+ Here, we emphasize that our universal bound of the minimal width is optimized over arbitrary activation functions. However, it cannot always be achieved when the activation functions are fixed. Here, we discuss the case of monotone activation functions.
216
+
217
+ If the activation functions are strictly monotone and continuous (such as leaky-ReLU), a width of at least $d_x + 1$ is needed for $C$ -UAP. This can be understood through topology theory. Leaky-ReLU, the nonsingular linear transformer, and its inverse are continuous and homeomorphic. Since compositions of homeomorphisms are also homeomorphisms, we have the following proposition: If $N = d_x = d_y = d$ and the weight matrix in leaky-ReLU networks are nonsingular, then the input-output map is a homeomorphism. Note that singular matrices can be approximated by nonsingular matrices; therefore, we can restrict the weight matrix in neural networks to the nonsingular case.
218
+
219
+ When $d_x \geq d_y$ , we can reformulate the leaky-ReLU network as $f_L(x) = W_{L + 1}\psi (x) + b_{L + 1}$ where $\psi (x)$ is the homeomorphism. Note that considering the case where $d_y = 1$ is sufficient,
220
+
221
+ according to Hanin & Sellke (2017); Johnson (2019). They proved that the neural network width $d_{x}$ cannot approximate any scalar function with a level set containing a bounded path component. This can be easily understood from the perspective of topology theory. An example is to consider the function $f^{*}(x) = \| x\|^{2}, x \in \mathcal{K} = [-2,2]^{d_{x}}$ shown in Figure 4.
222
+
223
+ ![](images/0dd65702c4e64c27de8751d0bca33b4d18ea8c28a25aa68b28f7ac9bb2c74b13.jpg)
224
+ Figure 4: Illustrating the possibility of UAP when $N = d_{x}$ . (a) Plot of $f^{*}(x) = \| x\|^{2}$ and its contour at $\| x\| = 1$ . (b) The original point $P$ is an inner point of the unit ball, while its image is a boundary point, which is impossible for homeomorphisms. (c) Any homeomorphism, approximating $\| x\|^{2}$ with error less than $\varepsilon$ (=0.1 for example) on $\Gamma$ , should have error larger than $1 - \varepsilon$ (=0.9) at $P$ . (d) Approximating $f^{*}$ in $L^p$ is possible by leaving a small region.
225
+
226
+ ![](images/8887623b6fe236076b1f22f65fa1865680e02f0f84b555880b6f64d447c48cdc.jpg)
227
+
228
+ ![](images/4803804f8608cb720c23a60d81118d1fc7a058a3b6f24d712fbb2794c5235d46.jpg)
229
+
230
+ ![](images/6830eb7d94bda368a9cf0ce2b5f79d0192a338bf01b737a6b1a005cb8d8e6280.jpg)
231
+
232
+ The case where $d_x < d_y$ . We present a simple example in Figure 5. The curve '4' corresponding to a continuous function from $[0,1] \subset \mathbb{R}$ to $\mathbb{R}^2$ cannot be uniformly approximated. However, the $L^p$ approximation is still possible.
233
+
234
+ ![](images/f8992866f213eb58bac323712214b4f84b60aa51eb50f5df52d2938ef273f630.jpg)
235
+ Figure 5: Illustrating the possibility of $C$ -UAP when $d_x \leq d_y$ . The curve in (a) is homeomorphic to the interval [0,1], while the curve '4' in (b) is not and cannot be approximated uniformly by homeomorphisms. The $L^p$ approximation is possible via (a).
236
+
237
+ ![](images/860ff61f0267c461f0213d7f0a69d89f36b071ca88801400cdb328eb3d38c2ec.jpg)
238
+
239
+ ![](images/3da0d82fd8ea663d3d51735f9785203c8311398637b67c0e5d5522d1fd42a8f5.jpg)
240
+
241
+ # 6 CONCLUSION
242
+
243
+ Let us summarize the main results and implications of this paper. After giving the universal lower bound of the minimum width for the UAP, we proved that the bound is optimal by constructing neural networks with some activation functions.
244
+
245
+ For the $L^p$ -UAP, our construction to achieve the critical width was based on the approximation power of neural ODEs, which bridges the feed-forward networks to the flow maps corresponding to the ODEs. This allowed us to understand the UAP of the FNN through topology theory. Moreover, we obtained not only the lower bound but also the upper bound.
246
+
247
+ For the $C$ -UAP, our construction was based on the encoder-memorizer-decoder approach in Park et al. (2021), where the activation sets contain a discontinuous function $\lfloor x \rfloor$ . It is still an open question whether we can achieve the critical width by continuous activation functions. Johnson (2019) proved that continuous and monotone activation functions need at least width $d_x + 1$ . This implies that nonmonotone activation functions are needed. By using the UOE activation, we calculated the critical width for the case of $d_x = 1$ . It would be of interest to study the case of $d_x \geq 2$ in future research.
248
+
249
+ We remark that our UAP is for functions on a compact domain. Examining the critical width of the UAP for functions on unbounded domains is desirable for future research.
250
+
251
+ # ACKNOWLEDGMENTS
252
+
253
+ We thank anonymous reviewers for their valuable comments and useful suggestions. This research is supported by the National Natural Science Foundation of China (Grant No. 12201053).
254
+
255
+ # REFERENCES
256
+
257
+ Hans-Peter Beise and Steve Dias Da Cruz. Expressiveness of neural networks having width equal or below the input dimension. arXiv preprint arXiv:2011.04923, 2020.
258
+ Yann Brenier and Wilfrid Gangbo. $l^p$ Approximation of maps by diffeomorphisms. *Calculus of Variations and Partial Differential Equations*, 16(2):147-164, 2003.
259
+ Ricky TQ Chen, Yulia Rubanova, Jesse Bettencourt, and David K Duvenaud. Neural ordinary differential equations. Advances in neural information processing systems, 31, 2018.
260
+ George Cybenkot. Approximation by superpositions of a sigmoidal function. Mathematics of Control, Signals and Systems, 2(4):303-314, 1989.
261
+ Yifei Duan, Li'ang Li, Guanghua Ji, and Yongqiang Cai. Vanilla feedforward neural networks as a discretization of dynamic systems. arXiv preprint arXiv:2209.10909, 2022.
262
+ Weinan E. A proposal on machine learning via dynamical systems. Communications in Mathematics and Statistics, 1(5):1-11, 2017.
263
+ Boris Hanin. Universal function approximation by deep neural nets with bounded width and relu activations. Mathematics, 7(10):992, 2019.
264
+ Boris Hanin and Mark Sellke. Approximating continuous functions by relu nets of minimal width. arXiv preprint arXiv:1710.11278, 2017.
265
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.
266
+ Kurt Hornik, Maxwell Stinchcombe, and Halbert White. Multilayer feedforward networks are universal approximators. Neural Networks, 2(5):359-366, 1989.
267
+ Jesse Johnson. Deep, Skinny Neural Networks are not Universal Approximators. In International Conference on Learning Representations, 2019.
268
+ Patrick Kidger and Terry Lyons. Universal approximation with deep narrow networks. In Conference on learning theory, pp. 2306-2327. PMLR, 2020.
269
+ Moshe Leshno, Vladimir Ya. Lin, Allan Pinkus, and Shimon Schocken. Multilayer feedforward networks with a nonpolynomial activation function can approximate any function. Neural Networks, 6(6):861-867, 1993.
270
+ Qianxiao Li, Ting Lin, and Zuowei Shen. Deep learning via dynamical systems: An approximation perspective. Journal of the European Mathematical Society, 2022.
271
+ Yiping Lu, Aoxiao Zhong, Quanzheng Li, and Bin Dong. Beyond finite layer neural networks: Bridging deep architectures and numerical differential equations. In International Conference on Machine Learning, pp. 3276-3285. PMLR, 2018.
272
+ Zhou Lu, Hongming Pu, Feicheng Wang, Zhiqiang Hu, and Liwei Wang. The expressive power of neural networks: A view from the width. volume 30, 2017.
273
+ Robert I. McLachlan and G. Reinout W. Quispel. Splitting methods. Acta Numerica, 11: 341-434, 2002.
274
+
275
+ Sejun Park, Chulhee Yun, Jaeho Lee, and Jinwoo Shin. Minimum Width for Universal Approximation. In International Conference on Learning Representations, 2021.
276
+ Domenec Ruiz-Balet and Enrique Zuazua. Neural ode control for classification, approximation and transport. arXiv preprint arXiv:2104.05278, 2021.
277
+ Zuowei Shen, Haizhao Yang, and Shijun Zhang. Optimal approximation rate of ReLU networks in terms of width and depth. Journal de Mathématiques Pures et Appliquées, 157:101-135, 2022.
278
+ Paulo Tabuada and Bahman Gharesifard. Universal approximation power of deep residual neural networks via nonlinear control theory. arXiv preprint arXiv:2007.06007, 2020.
279
+ Matus Telgarsky. Benefits of Depth in Neural Networks. In Conference on Learning Theory, pp. 1517-1539. PMLR, 2016.
280
+ Dmitry Yarotsky and Anton Zhevnerchuk. The phase diagram of approximation rates for deep neural networks. Advances in neural information processing systems, 33:13005-13015, 2020.
281
+
282
+ # A PROOF OF THE LEMMAS
283
+
284
+ # A.1 PROOF OF LEMMA 8
285
+
286
+ We give a definition and a lemma below that are useful for proving Lemma 8.
287
+
288
+ Definition 12. We say two functions, $f_1, f_2 \in C(\mathbb{R},\mathbb{R})$ , have the same ordering of extrema if they have the following properties:
289
+
290
+ 1) $f_{i}(x)$ has only a finite number of extrema that are (increasing) $x_{i,j}^{*}, j = 1,2,\ldots,m_{i}$ .
291
+ 2) $m_{1} = m_{2} \eqqcolon m$ and the two sequences,
292
+
293
+ $$
294
+ S _ {1} := \{f _ {1} (- \infty), f _ {1} (x _ {1, 1} ^ {*}), \dots , f _ {1} (x _ {1, m} ^ {*}), f _ {1} (+ \infty) \},
295
+ $$
296
+
297
+ and
298
+
299
+ $$
300
+ S _ {2} := \left\{f _ {2} (- \infty), f _ {2} \left(x _ {2, 1} ^ {*}\right), \dots , f _ {2} \left(x _ {2, m} ^ {*}\right), f _ {2} (+ \infty) \right\},
301
+ $$
302
+
303
+ have the same ordering, i.e.,
304
+
305
+ $$
306
+ S _ {1, i} < S _ {1, j} \Longleftrightarrow S _ {2, i} < S _ {2, j}, \quad \forall i, j,
307
+ $$
308
+
309
+ $$
310
+ S _ {1, i} = S _ {1, j} \Longleftrightarrow S _ {2, i} = S _ {2, j}, \quad \forall i, j.
311
+ $$
312
+
313
+ Lemma 13. Let $f_{1}$ and $f_{2}$ be continuous functions in $C(\mathbb{R},\mathbb{R})$ that have the same ordering of extrema; then, there are two strictly monotone functions, $v$ and $u$ , such that
314
+
315
+ $$
316
+ f _ {1} = v \circ f _ {2} \circ u.
317
+ $$
318
+
319
+ Proof. Here, we use the same notation in Definition 12. The functions $v$ and $u$ can be constructed as follows.
320
+
321
+ (1) Construct the outer function $v$ that tries to match the function values at the extrema. The only requirement is that
322
+
323
+ $$
324
+ S _ {1, i} = v (S _ {2, i}), \quad \forall i.
325
+ $$
326
+
327
+ Since $S_{1}$ and $S_{2}$ have the same ordering, it is easy to construct such a function $v$ that is continuous and strictly increasing, for example, piecewise linear.
328
+
329
+ (2) Construct the inner function $u$ to match the location of the extrema. Denote $g = v \circ f_2$ , which satisfies $f_1(x_{1,i}^*) = g(x_{2,i}^*)$ . Since $f_1$ and $g$ are strictly monotone and continuous on the intervals $I_i := (x_{1,i}^*, x_{1,i+1}^*)$ and $J_i := (x_{2,i}^*, x_{2,i+1}^*)$ , respectively, we can construct the function $u$ on $I_i$ as
330
+
331
+ $$
332
+ u (x) = g ^ {- 1} (f _ {1} (x)), x \in I _ {i}.
333
+ $$
334
+
335
+ Combining each piece of $u$ , we have a strictly increasing and continuous function $u$ on the whole space $\mathbb{R}$ . As a consequence, we have $f_{1} = g\circ u = v\circ f_{2}\circ u$ .
336
+
337
+ ![](images/0956560855dcc88a92a99699da2579178f170d4ca104b33370915ee7534292e4.jpg)
338
+
339
+ Lemma 8. For any function $f^{*}(x) \in C[0,1]$ and $\varepsilon > 0$ ,
340
+
341
+ 1) there is a leaky-ReLU+ABS (or leaky-ReLU+SIN) network with width one and depth $L$ such that $\int_0^1 |f^* (x) - f_L(x)|^p dx < \varepsilon^p$ .
342
+ 2) there is a leaky-ReLU + UOE network width one and depth $L$ such that $|f^{*}(x) - f_{L}(x)| < \varepsilon, \forall x \in [0,1]$ .
343
+
344
+ Proof. We mainly provide proof of the second point, while the first point can be proven using the same scheme.
345
+
346
+ For any function $f^{*}(x) \in C([0,1],\mathbb{R})$ and $\varepsilon > 0$ , we can approximate it by a polynomial $p_n(x)$ with order $n$ such that
347
+
348
+ $$
349
+ | f ^ {*} (x) - p _ {n} (x) | \leq \varepsilon / 2, \quad \forall x \in [ 0, 1 ],
350
+ $$
351
+
352
+ according to the well-known Weierstrass approximation theorem. Without a loss of generality, we can assume that $p_n(x)$ is not the same at all of its extrema. Then, we can represent $p_n(x)$ by the following composition, using Lemma 13 and the property of UOE:
353
+
354
+ $$
355
+ p _ {n} (x) = v \circ \rho \circ u (x), \tag {7}
356
+ $$
357
+
358
+ where $\rho(x)$ is the UOE function (2) and $v(x)$ and $u(x)$ are monotonically increasing continuous functions.
359
+
360
+ Then, we can approximate $p_n(x)$ by UOE networks. Since $v(x)$ and $u(x)$ are monotone, there are UOE networks $\tilde{v}(x)$ and $\tilde{u}(x)$ such that $\| v - \tilde{v} \|$ and $\| u - \tilde{u} \|$ are arbitrarily small. Hence, there is a UOE network $f_L(x) = \tilde{v} \circ \rho \circ \tilde{u}(x)$ that can approximate $p_n(x)$ such that
361
+
362
+ $$
363
+ \left| p _ {n} (x) - f _ {L} (x) \right| \leq \varepsilon / 2, \quad \forall x \in [ 0, 1 ],
364
+ $$
365
+
366
+ which implies that
367
+
368
+ $$
369
+ \left| f ^ {*} (x) - f _ {L} (x) \right| \leq \varepsilon .
370
+ $$
371
+
372
+ This completes the proof of the second point.
373
+
374
+ For the first point, we only emphasize that it is easy to construct a function $f(x)$ that has the same local maximum and local minimum in the interval and has $\| f - f^{*}\|_{L^p}$ small enough. This $f(x)$ has the same ordering of extrema as the sawtooth function (or sine) and hence can be uniformly approximated by leaky-ReLU+ABS (or leaky-ReLU+SIN) networks $f_{L}$ . As a consequence, $\| f_{L} - f^{*}\|_{L^{p}}$ is small enough.
375
+
376
+ # A.2 PROOF OF LEMMA 9
377
+
378
+ Lemma 9. Let $d \geq 2$ . Then, for any continuous function $f^{*}: \mathbb{R}^{d} \to \mathbb{R}^{d}$ , any compact set $\mathcal{K} \subset \mathbb{R}^{d}$ , and any $\varepsilon > 0$ , there exist a time $\tau \in \mathbb{R}^{+}$ and a piecewise constant input $(A, W, b): [0, \tau] \to \mathbb{R}^{d \times d} \times \mathbb{R}^{d \times d} \times \mathbb{R}^{d}$ so that the flow map $\phi^{\tau}$ associated with the neural ODE (3) satisfies: $\| f^{*} - \phi^{\tau}\|_{L^{p}(\mathcal{K})} \leq \varepsilon$ .
379
+
380
+ Proof. This is a special case of Theorem 2.3 in Li et al. (2022).
381
+
382
+ ![](images/8224ba8744fcf26b61b6cdaf8fa5dc5e8c183a0b29364fa6c9ebf913bb463914.jpg)
383
+
384
+ # A.3 PROOF OF LEMMA 10
385
+
386
+ Lemma 10. If the parameters $(A,W,b)$ in (3) are piecewise constants, then for any compact set $\mathcal{K}$ and any $\varepsilon >0$ , there is a leaky-ReLU network $f_{L}(x)$ with width $d$ and depth $L$ such that
387
+
388
+ $$
389
+ \left\| \phi^ {\tau} (x) - f _ {L} (x) \right\| \leq \varepsilon , \forall x \in \mathcal {K}. \tag {8}
390
+ $$
391
+
392
+ Proof. It is Theorem 2.2 in Duan et al. (2022).
393
+
394
+ ![](images/da41d1423a2c41d70bb7e9eb49371ba8c92bb4d968ff4d91e9b87274e4ae75d3.jpg)
395
+
396
+ # A.4 PROOF OF COROLLARY 11
397
+
398
+ Corollary 11. Let $\mathcal{K} \subset \mathbb{R}^d$ be a compact set and $d \geq 2$ ; then, for the function class $L^p(\mathcal{K}, \mathbb{R}^d)$ , the leaky-ReLU networks with width $d$ have $L^p$ -UAP.
399
+
400
+ Proof. For any $f^{*}(x) \in L^{p}(\mathcal{K},\mathbb{R}^{d})$ and $\varepsilon > 0$ , there is a flow map $\phi^{\tau}(x)$ associated with the neural ODE (3) such that (according to Lemma 9)
401
+
402
+ $$
403
+ \left\| f ^ {*} (\cdot) - \phi^ {\tau} (\cdot) \right\| _ {L ^ {p}} \leq \frac {\varepsilon}{2}.
404
+ $$
405
+
406
+ Then, employing Lemma 10, there is a leaky-ReLU network $f_{L}$ such that
407
+
408
+ $$
409
+ \left\| f _ {L} (\cdot) - \phi^ {\tau} (\cdot) \right\| _ {L ^ {p}} \leq \frac {\varepsilon}{2}.
410
+ $$
411
+
412
+ Therefore, we have
413
+
414
+ $$
415
+ \left\| f _ {L} (\cdot) - f ^ {*} (\cdot) \right\| _ {L ^ {p}} \leq \left\| f ^ {*} (\cdot) - \phi^ {\tau} (\cdot) \right\| _ {L ^ {p}} + \left\| f _ {L} (\cdot) - \phi^ {\tau} (\cdot) \right\| _ {L ^ {p}} \leq \varepsilon .
416
+ $$
417
+
418
+ ![](images/5aeff45634a0e0cdf610ff376b548b4912cfff4afa8324b395edef386c9959ac.jpg)
419
+
420
+ # B PROOF OF THE MAIN RESULTS
421
+
422
+ # B.1 PROOF OF LEMMA 1
423
+
424
+ Lemma 1. For any compact domain $\mathcal{K} \subset \mathbb{R}^{d_x}$ and any finite set of activation functions $\{\sigma_i\}$ , the $\{\sigma_i\}$ networks with width $w < w_{\min}^* \equiv \max(d_x, d_y)$ do not have the UAP for both $L^p(\mathcal{K}, \mathbb{R}^{d_y})$ and $C(\mathcal{K}, \mathbb{R}^{d_y})$ .
425
+
426
+ Proof. It is enough to show the following two counterexamples $f^{*}(x)$ that cannot be approximated in the $L^p$ -norm.
427
+
428
+ 1) $f^{*}(x) = \| x\|^{2}, x \in \mathcal{K} = [-2,2]^{d_{x}}$ , cannot be approximated by any networks with widths less than $d_{x} - 1$ . In fact, we can relax the networks to a function $f(x) = \phi (Wx + b)$ , where $Wx + b$ is a transformer from $\mathbb{R}^{d_x}$ to $\mathbb{R}^{d_x - 1}$ and $\phi (x)$ could be any function. A consequence is that there exists a direction $v$ (set as the vector satisfying $Wv = 0$ , $\| v\| = 1$ ) such that $f(x) = f(x + \lambda v)$ for all $\lambda \in \mathbb{R}$ . Then, considering the sets $A = \{x:\| x\| \leq 0.1\}$ and $B = \{x:\| x - v\| \leq 0.1\}$ , we have
429
+
430
+ $$
431
+ \begin{array}{l} \int_ {\mathcal {K}} | f (x) - f ^ {*} (x) | d x \geq \int_ {A} | f (x) - f ^ {*} (x) | d x + \int_ {B} | f (x) - f ^ {*} (x) | d x \\ \geq \int_ {A} (| f (x) - f ^ {*} (x) | + | f (x + v) - f ^ {*} (x + v) |) d x \\ \geq \int_ {A} \left(\left| f ^ {*} (x) - f ^ {*} (x + v) \right|\right) d x \geq 0. 8 | A |. \\ \end{array}
432
+ $$
433
+
434
+ Since the volume of $A$ is a fixed positive number, the inequality implies that even the $L^1$ approximation for $f^*$ is impossible. The case of the $L^p$ norm and the uniform norm is impossible as well.
435
+
436
+ 2) The function $f^{*}$ , the parametrized curve from 0 to 1 along the edge of the cubic, cannot be approximated by any networks with a width less than $d_{y} - 1$ . Relaxing the networks to a function $f(x) = W\psi (x) + b$ , $\psi (x)$ could be any function. Since the range of $f$ is in a hyperplane while $f^{*}$ has a positive distance to any hyperplane, the target $f^{*}$ cannot be approximated.
437
+
438
+ # B.2 PROOF OF THEOREM 2
439
+
440
+ Theorem 2. Let $\mathcal{K} \subset \mathbb{R}^{d_x}$ be a compact set; then, for the function class $L^p(\mathcal{K},\mathbb{R}^{d_y})$ , the minimum width of leaky-ReLU networks having $L^p$ -UAP is exactly $w_{\min} = \max(d_x,d_y,2)$ .
441
+
442
+ Proof. Using Lemma 1, we only need to prove two points: 1) the $L^p$ -UAP holds when $\max(d_x, d_y) \geq 2$ , 2) when $d_x = d_y = 1$ , there is a function that cannot be approximated by leaky-ReLU networks with width one (since width two is enough for the $L^p$ -UAP).
443
+
444
+ The first point is a consequence of Corollary 11 since we can extend the target function to dimension $d = \max(d_x, d_y)$ .
445
+
446
+ The second point is obvious since leaky-ReLU networks with a width of one are monotone functions that cannot approximate nonmonotone functions such as $f^{*}(x) = x^{2}, x \in [-1, 1]$ .
447
+
448
+ # B.3 PROOF OF THEOREM 3
449
+
450
+ Theorem 3. Let $\mathcal{K} \subset \mathbb{R}^{d_x}$ be a compact set; then, for the function class $L^p(\mathcal{K}, \mathbb{R}^{d_y})$ , the minimum width of leaky-ReLU+ABS networks having $L^p$ -UAP is exactly $w_{\min} = \max(d_x, d_y)$ .
451
+
452
+ Proof. This is a consequence of Theorem 2 (for the case of $\max(d_x, d_y) \geq 2$ ) combined with Lemma 8 (for the case of $d_x = d_y = 1$ ).
453
+
454
+ # B.4 PROOF OF LEMMA 4
455
+
456
+ Lemma 4. Let $\mathcal{K} \subset \mathbb{R}^{d_x}$ be a compact set; then, for the function class $C(\mathcal{K},\mathbb{R}^{d_y})$ , the minimum width of $\text{ReLU} + \text{FLOOR}$ networks having $C$ -UAP is exactly $w_{\min} = \max(d_x,2,d_y)$ .
457
+
458
+ Proof. Recalling the results of Lemma 1, we only need to prove two points: 1) the C-UAP holds when $\max(d_x, d_y) \geq 2$ , 2) when $d_x = d_y = 1$ , there is a function that cannot be approximated by ReLU+FLOOR networks with width one (since width two is enough for the $C$ -UAP).
459
+
460
+ The first step can be constructed by the encoder-memorizer-decoder approach. The second point is obvious since $\mathrm{ReLU} + \mathrm{FLOOR}$ networks with width one are monotone functions that cannot approximate nonmonotone functions such as $f^{*}(x) = x^{2}, x \in [-1, 1]$ .
461
+
462
+ # B.5 PROOF OF THEOREM 5
463
+
464
+ Theorem 5. The UOE networks with width $d_y$ have $C$ -UAP for functions in $C([0,1],\mathbb{R}^{d_y})$ .
465
+
466
+ Proof. Since functions in $C([0,1],\mathbb{R}^{d_y})$ can be regarded as $d_{y}$ one-dimensional functions, it is enough to prove the case of $d_{y} = 1$ , which is the result in Lemma 8. $\square$
467
+
468
+ # B.6 PROOF OF COROLLARY 6
469
+
470
+ Corollary 6. Let $\mathcal{K} \subset \mathbb{R}^{d_x}$ be a compact set; then, for the continuous function class $C(\mathcal{K},\mathbb{R}^{d_y})$ , the minimum width of $UOE + FLOOR$ networks having $C$ -UAP is exactly $w_{\min} = \max(d_x,d_y)$ .
471
+
472
+ Proof. The case where $\max(d_x, d_y) \geq 2$ is a consequence of Lemma 4 since the UOE function contains the leaky-ReLU as a part. The case where $\max(d_x, d_y) = 1$ , i.e. $d_x = d_y = 1$ , is a consequence of Lemma 8.
achievetheminimumwidthofneuralnetworksforuniversalapproximation/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbc2a2136c0a4588fa53596966943125bc3f85ad335c9894f8c433a08249327a
3
+ size 347557
achievetheminimumwidthofneuralnetworksforuniversalapproximation/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:032cd093cee3f3ae6046a24c0c45b5c22476386c4782718172898da61122f954
3
+ size 818738
achievingnearoptimalindividualregretlowcommunicationsinmultiagentbandits/874fb7b1-e176-4d85-8046-a22c9733e770_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0f14960756b9d5706804cc6a15f15e19abf670c532682169059072371a2d283
3
+ size 162276
achievingnearoptimalindividualregretlowcommunicationsinmultiagentbandits/874fb7b1-e176-4d85-8046-a22c9733e770_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ade155a4b296ba147ecc12eb699cb5a08cedfef63de77022f4eaa61e762c8d1f
3
+ size 191680
achievingnearoptimalindividualregretlowcommunicationsinmultiagentbandits/874fb7b1-e176-4d85-8046-a22c9733e770_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d88faae6167650d49f676bc688baf93586149630a3332386f3bfc8b9e182990
3
+ size 1980552
achievingnearoptimalindividualregretlowcommunicationsinmultiagentbandits/full.md ADDED
@@ -0,0 +1,726 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ACHIEVING NEAR-OPTIMAL INDIVIDUAL REGRET & LOW COMMUNICATIONS IN MULTI-AGENT BANDITS
2
+
3
+ Xuchuang Wang
4
+
5
+ Department of Computer Science and Engineering
6
+ The Chinese University of Hong Kong
7
+ xuchuangw@gmail.com
8
+
9
+ Lin Yang
10
+
11
+ School of Intelligence Science and Technology
12
+ Nanjing University
13
+ linyang@nju.edu.cn
14
+
15
+ Yu-zhen Janice Chen
16
+
17
+ College of Information and Computer Sciences
18
+ University of Massachusetts Amherst
19
+ yuzhenchen@cs.umass.edu
20
+
21
+ Xutong Liu
22
+
23
+ Department of Computer Science and Engineering The Chinese University of Hong Kong liuxt@cse.cuhk.edu.hk
24
+
25
+ Mohammad Hajiesmaili & Don Towsley
26
+
27
+ College of Information and Computer Sciences
28
+ University of Massachusetts Amherst
29
+ {hajiesmaili, towsley}@cs.umass.edu
30
+
31
+ John C.S. Lui
32
+
33
+ Department of Computer Science and Engineering The Chinese University of Hong Kong cslui@cse.cuhk.edu.hk
34
+
35
+ # ABSTRACT
36
+
37
+ Cooperative multi-agent multi-armed bandits (CMA2B) study how distributed agents cooperatively play the same multi-armed bandit game. Most existing CMA2B works focused on maximizing the group performance of all agents—the accumulation of all agents' individual performance (i.e., individual reward). However, in many applications, the performance of the system is more sensitive to the "bad" agent—the agent with the worst individual performance. For example, in a drone swarm, a "bad" agent may crash into other drones and severely degrade the system performance. In that case, the key of the learning algorithm design is to coordinate computational and communicational resources among agents so to optimize the individual learning performance of the "bad" agent. In CMA2B, maximizing the group performance is equivalent to minimizing the group regret of all agents, and maximizing the individual performance can be measured by minimizing the maximum (worst) individual regret among agents. Minimizing the maximum individual regret was largely ignored in prior literature, and currently, there is little work on how to minimize this objective with a low communication overhead. In this paper, we propose a near-optimal algorithm on both individual and group regrets, in addition, we also propose a novel communication module in the algorithm, which only needs $O(\log (\log T))$ communication times where $T$ is the number of decision rounds. We also conduct simulations to illustrate the advantage of our algorithm by comparing it to other known baselines.
38
+
39
+ # 1 INTRODUCTION
40
+
41
+ The stochastic multi-armed bandit problem is a classic sequential decision making problem. Given $K$ arms, there is one agent who repeatedly chooses one arm to pull and observes a stochastic reward from the pulled arm in each time slot. To maximize cumulative reward (or minimize regret which is the cumulative reward difference between the optimal decision and agent's choices), the agent needs to pull an arm either with a large empirical mean reward to greedily maximize reward (exploitation), or whose reward estimate is highly uncertain so as to reduce that uncertainty to discover good arms (exploration). To model many real life applications, e.g., cognitive radio with multiple users (Liu & Zhao, 2010; Jouini et al., 2010; Boursier & Perchet, 2019), clinical trials in multiple labs (Wang, 1991), recommendation systems with multiple servers (Agarwal et al., 2008; Li et al., 2010; Landgren et al., 2016), cooperative source search by multiple robots (Li et al., 2014; Jin et al., 2017), etc., one
42
+
43
+ Table 1: A comparison summary of prior literature and this work (all regret bounds are problem-dependent and we omit the $1 / \Delta_{2}$ factor.)
44
+
45
+ <table><tr><td></td><td>Individual regret</td><td>Group regret</td><td>Communication time</td></tr><tr><td>DPE2 (Wang et al., 2020a)</td><td>O(K log T)</td><td>O(K log T)</td><td>O(K2M2)</td></tr><tr><td>ComEx (Madhushani &amp; Leonard, 2021)</td><td>O(K log T)</td><td>O(K log T)</td><td>O(KM log T)</td></tr><tr><td>GosInE (Chawla et al., 2020)</td><td>O((K/M + 2) log T)</td><td>O((K + 2M) log T)</td><td>Ω(log T)</td></tr><tr><td>Dec_UCB (Zhu et al., 2021a)</td><td>O((K/M) log T)</td><td>O(K log T)</td><td>O(MT)</td></tr><tr><td>UCB-TCOM (our algorithm)</td><td>O((K/M) log T)</td><td>O(K log T)</td><td>O(KM log(log T))</td></tr></table>
46
+
47
+ needs to extend the model to allow for more than one agent ( $M > 1$ ) playing the same multi-armed bandit game. These agents cooperate with each other to minimize their regrets. We call this problem the cooperative multi-agent multi-armed bandits (CMA2B) problem and present it formally in §2.
48
+
49
+ The most common objective of CMA2B is to minimize the aggregate regret among all $M$ agents, dubbed as group regret in this paper. This objective has been studied in the majority of prior work (Boursier & Perchet, 2019; Chawla et al., 2020; Huang et al., 2021; Shi et al., 2021b; Wang et al., 2020a;b). In addition to group regret, individual performance among agents is another important metric that is less studied in prior work on CMA2B. The performance of each individual agent is critical in many applications in distributed systems. For example, in many distributed resource allocation scenarios with different agents in charge of the allocation, overall performance depends on the performance of the bottleneck agent instead of the aggregate performance of all agents. This can also be seen in a computer network scenario, in which an ISP may apply learning-based algorithms (Ma et al., 2010; Jiang et al., 2018) for some networking problems such as shortest path routing, channel selection, etc. To ensure that the users are served fairly, the underlying algorithms should fairly provide approximately equivalent individual performance for each learning agent. This is equivalent to minimizing the bottleneck agent's individual regret. For another thing, in network optimization literature, the max-min fairness metric—maximize the minimal individual reward—is widely used to measure a system's fairness (Srikant & Ying, 2013, §2.21), such as fair queuing (Demers et al., 1989). Since the regret is the opposite of reward, optimizing the max-min fairness is also equivalent to minimizing the bottleneck agent's regret. Other fairness motivation examples can be found in political philosophy (Rawls, 2004). In this paper, we explicitly take into account the notion of minimizing the maximum individual regret and, for brevity, hereinafter, refer to it as the individual regret.
50
+
51
+ Another important metric in CMA2B is the communication time of all agents. For some distributed systems, e.g., agents are geographically located, communications among agents can be expensive. Thus it is important to design a cooperative learning algorithm that provides minimal group and individual regrets, while at the same time, incurs a small communication cost. In addition, it will be desirable to have a learning algorithm in which one can tune parameters so as to trade off communication times with regret as needed by different applications.
52
+
53
+ Contributions. In §3, we present the UCB-TCOM algorithm that achieves not only a near-optimal group regret of $O((K / \Delta_2) \log T)$ but also a near-optimal problem-dependent individual regret of $O((K / M\Delta_2) \log T)$ with only $O(\log (\log T))$ communication times, where $\Delta_2$ is the smallest reward mean gap between arms and $T$ is the number of rounds. This is the first near-optimal algorithm on individual regret with efficient communications: Previous low communication algorithms, e.g., the leader-follower algorithm (Wang et al., 2020b), cannot achieve the near-optimal individual regret; and previous near-optimal algorithms on individual regret, e.g., GosInE (Chawla et al., 2020), required high communication times (see related works below). UCB-TCOM achieves the near-optimal individual regret performance by evenly dividing the group regret to all agents. To equalize the regrets of all agents, UCB-TCOM directs agents to synchronously pull arms: agents only utilize the common reward observations, i.e., those having been broadcast over all agents, to make decisions. The communication policy TCOM (Tunable COMmunication) of UCB-TCOM is a parametric meta-algorithm that governs the communication of agents and can be executed on top of any underlying bandit learning algorithm. A salient feature of TCOM is that it can be tuned to balance regret and communication times. In particular, two parameters in TCOM can be tuned to determine the aggressiveness and frequency of communications among agents. Our analysis explicitly demonstrates how communication times can be tuned from 0 to $O(T)$ . Finally, we report numerical results in §5.
54
+
55
+ Related works. The most relevant prior literature on CMA2B is summarized in Table 1. First of all, notice all algorithms in Table 1 attain a similar $O(K \log T)$ regret upper bound. So, we compare them from the perspective of individual regret and communication cost. Previous algorithms exhibit good individual regret bounds, i.e., Dec_UCB (Zhu et al., 2021a) and GosInE (Chawla et al., 2020). However, the communication times of Dec_UCB are $O(T)$ and that of GosInE are at least $\Omega(\log T)$ . It is worth noting that the leader-follower algorithm (DPE2) (Wang et al., 2020a), where an agent, i.e., the leader, takes responsibility for exploration, and the other agents, i.e., followers, only exploit the arms recommended by the leader incurs constant $O(1)$ communication cost. However, the leader-follower approach inherently fails to achieve good individual regret—the leader agent incurs almost all of the regret $O(K \log T)$ while followers only incur constant regrets. Lastly, we mention the communication policy ComEx (Madhushani & Leonard, 2021) designed for fully distributed algorithms achieves a near-optimal group regret of $O(K \log T)$ with a communication cost of $O(\log T)$ . However, ComEx does not guarantee optimal individual regret. We provide a comprehensive discussion of related work in Appendix A.
56
+
57
+ # 2 MODEL
58
+
59
+ System Model. Consider a multi-armed bandit (MAB) game with $K \in \mathbb{N}_{+}$ arms. Each arm $k \in [K]^{1}$ is associated with a $(1/2)$ -sub-gaussian reward distribution with unknown mean $\mu(k)$ , e.g., it can be any distribution whose support is $[0,1]$ . Assume there are $M \in \mathbb{N}_{+}$ distributed agents playing this bandit game in $T \in \mathbb{N}_{+}$ rounds. In time slot $t \in [T]$ , each agent $i \in [M]$ pulls an arm $A_{t}^{(i)} \in [K]$ and receives a reward $X_{t}^{(i)}(A_{t}^{(i)})$ drawn from the reward distribution of arm $A_{t}^{(i)}$ . When more than one agent pulls the same arm, each of them gets an independent reward drawn from the arm's distribution. Without loss of generality, assume the $K$ arms are ordered in descending of their mean rewards, i.e., $\mu(1) > \mu(2) \geqslant \ldots \geqslant \mu(K)$ . Denote the reward mean gap as $\Delta_{k} := \mu(1) - \mu(k)$ and assume $\Delta_{2} > 0$ , i.e., arm 1 is the unique optimal arm.
60
+
61
+ Group regret. This paper uses group regret—the cumulative difference between the optimal policy rewards and an algorithm's rewards—as the algorithm's performance metric. Each agent's optimal policy is to pull arm 1 in all $T$ rounds. So, under any cooperative algorithm $\mathcal{A}$ , the expected group regret of all $M$ agents is defined as follows,
62
+
63
+ $$
64
+ \mathbb {E} \left[ \mathrm {R} _ {\mathrm {T}} (\mathcal {A}) \right] := M T \mu (1) - \mathbb {E} \left[ \sum_ {i \in [ M ]} \sum_ {t \in [ T ]} X _ {t} ^ {(i)} \left(A _ {t} ^ {(i)}\right) \right], \tag {1}
65
+ $$
66
+
67
+ where the expectation is taken over the randomness of stochastic rewards and algorithm's (agents') decisions.
68
+
69
+ Maximum individual regret. While the group regret in (1) characterizes overall system performance, individual performance of each agent is important as well, and, among all individual regrets of agents, the maximum one is usually more important. For example, in a drone swarm, the failure/misbehavior of a single drone, e.g., it crashes into other drones, can dramatically degrade the whole system's overall performance; in network measurement, the slowest inference engine determines how fast the network parameters, e.g., traffic flows and channel bandwidths, are learned. In the above systems, one can define the individual regret objective as follows,
70
+
71
+ $$
72
+ \mathbb {E} \left[ \mathrm {R} _ {\mathrm {T}} ^ {\mathrm {i n d}} (\mathcal {A}) \right] := T \mu (1) - \mathbb {E} \left[ \min _ {i \in [ M ]} \sum_ {t \in [ T ]} X _ {t} ^ {(i)} \left(A _ {t} ^ {(i)}\right) \right]. \tag {2}
73
+ $$
74
+
75
+ Communication times. Agents in the system cooperate with each other by communicating their reward observations or reward averages. Any message (if communicated) is broadcast to all agents and received within a single time slot. A broadcast message includes an arm index, the average arm reward for all observations since the previous broadcast, and the number of observations. The total number of messages communicated among these $M$ agents quantifies the communication times of an algorithm. We count each broadcast as one message<sup>2</sup>. Hence, the communication times under
76
+
77
+ algorithm $\mathcal{A}$ are defined as follows,
78
+
79
+ $$
80
+ \mathbb {E} \left[ \mathrm {C} _ {\mathrm {T}} (\mathcal {A}) \right] := \mathbb {E} \left[ \sum_ {i \in [ M ]} \sum_ {t \in [ T ]} \mathbb {1} \left\{\text {a g e n t} i \text {b r o a d c a s t s o n e m e s s a g e i n t i m e s l o t} t \right\} \right]
81
+ $$
82
+
83
+ Model extensions. For ease of presentation of the core ideas, we focus on a simple model formulation where agents reside on a complete graph (i.e., one clique) and incur no communication delays. In Appendix G, we extend the basic model and communication policy TCOM to account for arbitrary communication topology and deterministic communication delays. These demonstrate the generality of our core algorithmic ideas.
84
+
85
+ # 3 ALGORITHM DESIGN
86
+
87
+ In this section, we devise a cooperative learning algorithm, UCB-TCOM, which attains the near-optimal results of not only group regret but maximum individual regret as well with only $O(\log (\log T))$ communication times. The core of UCB-TCOM is a communication strategy TCOM that can run on top of diverse bandit learning algorithms in multi-agent systems. Specifically, TCOM determines how an agent broadcasts its reward observations to other agents, while the underlying bandit learning algorithm, e.g., UCB, Thompson sampling, elimination, determines how each agent pulls arms. In §3.1, we present TCOM as the communication module of our algorithm design. Then, in §3.2, we show how to integrate TCOM with UCB as the underlying bandit algorithm to obtain UCB-TCOM.
88
+
89
+ Recall that in CMA2B, the near-optimal group regret $\Theta((K / \Delta_2) \log T)$ (Wang et al., 2020a, §1.2) is the summation of individual regrets of all agents. Therefore, minimizing the maximum individual regret is equivalent to equalizing individual regrets of all agents. Uniformly dividing the group regret to each individual agent can be achieved by a symmetric learning structure where agents pull same arms in each time slot. However, in current state-of-the-art algorithms, e.g., the leader-follower algorithm DPE2 (Wang et al., 2020a), agents usually take different roles—some explore and other exploit—and thus those algorithms are suboptimal on the individual regret objective. For another thing, some straightforward symmetric learning algorithms, e.g., cooperative UCB (Yang et al., 2022, §III.B) where agents communicate and take the same action in every time slots, requires high communication times; most are $O(T)$ . Instead UCB-TCOM proposes a novel communication policy TCOM to maintain a symmetric learning structure with low communication times. Especially, to maintain the symmetric learning structure, agents only utilize global reward observations. Here "global" observations refer to those observations that have already been broadcast to all agents, excluding those recent reward observations that have not been shared (i.e., only local to one agent).
90
+
91
+ # 3.1 TCOM: A TUNABLE COMMUNICATION POLICY
92
+
93
+ A key idea of TCOM is based on the observation that the benefit of cooperation in a multi-agent setting comes primarily from sharing information regarding suboptimal arms instead of the optimal arm. Specifically, an agent can avoid pulling a suboptimal arm when she receives external observations from others, while sharing observations from the optimal arm may not necessarily reduce individual agent regret, e.g., sharing information about an optimal arm while other agents still need to exploit the optimal arm will not decrease regret but merely increase communication times. So, instead of paying the cost for communicating the optimal arm observations, an agent can let other agents explore the optimal arm by themselves. Hence, agents can reduce overall communication overhead by refraining from communicating the optimal arm's rewards while still guaranteeing near-optimal regret. To implement the above idea, each agent constructs a communication arm set from which TCOM dynamically excludes the arms that are likely to be optimal. Then, each agent only shares observations about potentially suboptimal arms in the communication arm set. We explain how to construct the communication arm set in §3.1.1.
94
+
95
+ The communication arm set technique in the previous paragraph reduces communication times to $O(\log T)$ . To further reduce communication times, another key idea of TCOM is to aggregate an arm's multiple observations and send their average at one time. Naively applying a phase-based
96
+
97
+ communication protocol, e.g., the one in (Wang et al., 2020b), to aggregate arm observations does not automatically further decrease communication times to $O(\log (\log T))$ . Because the phase-based protocol was used to achieve $O(\log T)$ communication times which is already reached via the communication arm set technique. Instead, one needs to break the pre-assigned rigid phased-based communications, i.e., observations of all arms are communicated together at the end of each phase, and allow each arm—depending on how many times it lies inside the communication arm set—to have its own specific and dynamic phases for communications. This idea is implemented by a carefully designed observation-buffering technique. The novel technique can boost the above communication arm set technique and further reduce communication times to $O(\log (\log T))$ . We formalize this idea in §3.1.2.
98
+
99
+ # 3.1.1 CONSTRUCTION OF THE COMMUNICATION ARM SET
100
+
101
+ In each time slot, each agent determines its communication arm set by comparing its estimated confidence intervals of the reward means. Let $\hat{n}_t(k)$ denote the number of times of the global reward observations of arm $k$ up to time slot $t$ . That is, the number of observations that has been broadcast across the whole CMA2B. Let $\hat{\mu}_t(k)$ denote the mean reward estimate of these $\hat{n}_t(k)$ observations. Then, given the empirical mean $\hat{\mu}_t(k)$ and the number of observations $\hat{n}_t(k)$ , we construct a tunable confidence interval for the true reward mean $\mu(k)$ by Hoeffding's inequality (Hoeffding, 1994). The confidence interval centered at $\hat{\mu}_t(k)$ has a width $\mathsf{C}\mathsf{I}_t(k,\alpha)$ expressed as
102
+
103
+ $$
104
+ \mathrm {C I} _ {t} (k, \alpha) := \alpha \sqrt {\log t / \hat {n} _ {t} (k)},
105
+ $$
106
+
107
+ where $\alpha \in (-\infty, \infty)$ is a tunable parameter. The reward mean $\mu(k)$ lies inside the confidence interval $(\hat{\mu}_t(k) - \mathbb{C}\mathbb{I}_t(k,\alpha), \hat{\mu}_t(k) + \mathbb{C}\mathbb{I}_t(k,\alpha))$ with probability of at least $1 - 2t^{-2\alpha^2}$ (see Lemma 1 in Appendix B.2). We define the tunable upper and lower confidence bounds of $\hat{\mu}_t(k)$ as
108
+
109
+ $$
110
+ \mathrm {t U C B} _ {t} (k, \alpha) := \hat {\mu} _ {t} (k) + \mathrm {C I} _ {t} (k, \alpha), \quad \mathrm {t L C B} _ {t} (k, \alpha) := \hat {\mu} _ {t} (k) - \mathrm {C I} _ {t} (k, \alpha).
111
+ $$
112
+
113
+ For any arm $k$ , if there exists another arm $k'$ whose upper confidence bound $\mathsf{tUCB}_t(k', \alpha)$ is greater than arm $k$ 's lower confidence bound $\mathsf{tLCB}_t(k, \alpha)$ , then arm $k$ 's mean reward estimate is not significantly greater than those of others. In this case, arm $k$ is potentially identified as suboptimal, and its new observation (if any) should be broadcast to others. Formally, given tuning parameter $\alpha$ , communication arm set $\mathcal{C}_t(\alpha)$ of agent $i$ at time $t$ contains all arms identified as suboptimal, i.e.,
114
+
115
+ $$
116
+ \mathcal {C} _ {t} (\alpha) := \{k \in [ K ]: \exists k ^ {\prime} \in [ K ] \backslash \{k \} \text {s u c h t h a t} \mathrm {t U C B} _ {t} \left(k ^ {\prime}, \alpha\right) > \mathrm {t L C B} _ {t} (k, \alpha) \}. \tag {3}
117
+ $$
118
+
119
+ Remark 1 (Comparison to the candidate arm set of elimination algorithms). At the first glance, the communication arm set in (3) is similar to the active arm elimination (AAE) policy's candidate arm set (Auer et al., 2002; Even-Dar et al., 2006; Yang et al., 2022). However, the two sets have intrinsically different usages: TCOM constructs the communication arm set to determine how to broadcast observations, while AAE uses its candidate set to determine how to pull arms.
120
+
121
+ Remark 2 (Comparison to ComEx (Madhushani & Leonard, 2020; 2021)). The intuition we utilize in this subsection—communicating the observations of suboptimal arms is more useful than that of the optimal arm—has also been observed in Madhushani & Leonard (2020; 2021). Compared to ComEx, the algorithmic novelties of TCOM are (1) applying confidence intervals to more accurately identify the suboptimal arms; (2) combining the observation-buffering broadcast with the communication arm set technique; (3) including tunable parameters with more flexibility. The detailed algorithm design comparisons are deferred to Appendix A.1. Furthermore, as we mentioned in Table 1, ComEx fails to achieve the near-optimal individual regret and its communication times $O(\log T)$ are not as good as our $O(\log (\log T))$ result.
122
+
123
+ # 3.1.2 OBSERVATION-BUFFERING BROADCAST
124
+
125
+ While the straightforward immediate broadcast observation sharing method can produce a near-optimal group regret, its $O(\log T)$ communication times are not efficient. Prior literature (Desautels et al., 2014; Gao et al., 2019) has shown that the near-optimal regret in stochastic bandit games is preserved in the presence of a constant delay before receiving reward observations (Desautels et al., 2014), and the regret upper bound does not deteriorate too much when delays increase according to a geometric sequence (Gao et al., 2019). These results suggest that it is not necessary to immediately communicate the latest observations. Instead, to reduce communication times, the
126
+
127
+ Algorithm 1 The UCB-TCOM Algorithm (for each agent)
128
+ 1: Input: the communication arm set parameter $\alpha$ and buffering ratio $\beta$
129
+ 2: Initialization: $\hat{n}_t(k) = 0, N_t(k) = 0, \hat{\mu}_t(k) = 0, \tau_t(k) = 0$
130
+ 3: for each decision round $t$ do Both for-loops (Lines 3 and 15) run in parallel.
131
+ 4: Pull arm $A_t$ with the highest global UCB
132
+ 5: Observe arm $A_t$ s reward $X_t(A_t)$
133
+ 6: if $A_t \in \mathcal{C}_t(\alpha)$ then
134
+ 7: Increase $N_t(A_t)$ by 1
135
+ 8: Update this phase's empirical mean $\tilde{\mu}_t(A_t)$
136
+ 9: end if
137
+ 10: if $N_t(A_t) \geqslant \lceil \beta N_{\tau_t(A_t)}(A_t) \rceil$ then
138
+ 11: Broadcast the message ( $\tilde{\mu}_t(A_t), N_t(A_t), A_t$ )
139
+ 12: $\tau_t(A_t) \gets t$
140
+ 13: end if
141
+ 14: end for
142
+ 15: for each newly received message ( $\tilde{\mu}_t(k), N_t(k), k$ ) from the past decision round do
143
+ 16: Update the empirical mean $\hat{\mu}_t(k) \gets \frac{\hat{\mu}_t(k)\hat{n}_t(k) + \hat{\mu}_t(k)\lfloor N_t(k)(1 - 1/\beta)\rfloor}{\hat{n}_t(k) + \lfloor N_t(k)(1 - 1/\beta)\rfloor}$
144
+ 17: Increase $\hat{n}_t(k)$ by $\lfloor N_t(k)(1 - 1/\beta)\rfloor$
145
+ 18: Update the communication arm set $\mathcal{C}_t(\alpha)$ via (3) based on tunable confidence bounds
146
+ 19: end for
147
+
148
+ policy can aggregate multiple reward observations from an arm and communicate their sample mean in one message. Combining this mechanism with the designed communication arm set in (3) leads to the communication policy TCOM.
149
+
150
+ Next, we present how the buffering mechanism controls observation aggregation. Denote by $N_{t}(k)$ the number of times that an agent pulls arm $k$ when the arm lies inside the communication arm set, i.e., $k \in \mathcal{C}_t(\alpha)$ , up to time $t$ . Therefore, $N_{t}(k)$ is the number of messages agent $i$ would broadcast, each containing a single arm $k$ observation, if one employs immediate broadcast. Instead of immediate broadcast, we buffer observations and then broadcast their average when the number of observations (since previous broadcast) increases by a ratio. For example, if the ratio is 2, the agent sends arm $k$ 's reward average of the arm's new observations since last broadcast whenever the counter $N_{t}(k)$ equals 2, 4, 8, 16, ..., etc, or power of 2. More generally, let $\beta (>1)$ denote a ratio parameter that controls how the buffering size of reward observations is increased. When counter $N_{t}(k)$ increases to $\lceil \beta N_{\tau}(k) \rceil$ since the last time agent $i$ broadcast arm $k$ 's average reward, i.e., $N_{t}(k) \geqslant \lceil \beta N_{\tau}(k) \rceil$ , then agent $i$ broadcasts a message including the arm index $k$ , the counter $N_{t}(k)$ , and the sample mean $\tilde{\mu}_{t}(k)$ , of this arm's latest $\lfloor N_{t}(k)(1 - 1 / \beta) \rfloor$ observations to other agents. Due to the space limit, the detailed discussion of TCOM's tunability on parameters $\alpha$ and $\beta$ is deferred to Appendix D.1.
151
+
152
+ # 3.2 UCB-TCOM: APPLICATION OF TCOM ON THE UCB ALGORITHM
153
+
154
+ In this section, we demonstrate how to leverage the communication policy TCOM to develop a communication-efficient extension of cooperative UCB. We call the proposed algorithm UCB-TCOM.
155
+
156
+ In slot $t$ , agent $i$ chooses the arm with the largest global UCB index, i.e., $A_{t} = \arg \max_{k}\mathrm{UCB}_{t}(k)$ , where $\mathrm{UCB}_t(k)\coloneqq \hat{\mu}_t(k) + \sqrt{2\log t / \hat{n}_t(k)}$ , and observes the arm's reward $X_{t}^{(i)}(A_{t})$ . Note the UCB indexes and action $A_{t}$ are the same for all agents; thus we omit their superscript $(i)$ . After pulling arm $A_{t}$ , the agent checks whether the arm is in its communication arm set $\mathcal{C}_t$ (Line 6); if yes, counter $N_{t}(A_{t})$ is increased by 1, and the buffered reward sample mean is updated (Lines 7-8). When the counter $N_{t}(A_{t})$ is increased by a factor $\beta$ since the last time slot that agent $i$ communicated arm $A_{t}$ 's sample mean (Line 10), the agent sends out arm $A_{t}$ 's sample mean to other agents (Line 11). Meanwhile, if the agent receives messages from other agents, it updates its estimates accordingly (Lines 16- 18). The details are presented in Algorithm 1.
157
+
158
+ # 4 THEORETICAL RESULTS
159
+
160
+ In this section, we analyze the regret and communication times of UCB-TCOM (Algorithm 1).
161
+
162
+ # 4.1 RESULT OVERVIEW
163
+
164
+ We present near-optimal group and individual regret upper bounds of UCB-TCOM in Theorem 1.
165
+
166
+ Theorem 1 (Regret upper bounds of UCB-TCOM for $\alpha > 1$ ). When the communication arm set parameter $\alpha > 1^4$ and buffering-ratio $\beta > 1$ , UCB-TCOM (Algorithm 1) attains a near-optimal group regret upper bound in terms of number of decision rounds $T$ , arms $K$ , and agents $M$ , or formally,
167
+
168
+ $$
169
+ \mathbb {E} \left[ \mathrm {R} _ {\mathrm {T}} (\mathcal {A}) \right] \leqslant \sum_ {k > 1} \frac {8 \beta \log T}{\Delta_ {k}} + M K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1}, \tag {4}
170
+ $$
171
+
172
+ and UCB-TCOM (Algorithm 1) also attains a near-optimal individual regret upper bound, or formally,
173
+
174
+ $$
175
+ \mathbb {E} \left[ \mathrm {R} _ {\mathrm {T}} ^ {\text {i n d}} (\mathcal {A}) \right] \leqslant \sum_ {k > 1} \frac {8 \beta \log T}{M \Delta_ {k}} + K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1}. \tag {5}
176
+ $$
177
+
178
+ Theorem 1's group regret proof is based on the observation that most suboptimal arm observations are broadcast when $\alpha > 1$ (see Theorem 2(iii)), and the fact that under the observation buffering mechanism the number of observations of suboptimal arms increases by at most a ratio of $\beta$ between broadcasts. Theorem 1's individual regret proof is based on the fact that all $M$ agents are symmetric—they always pull the same arms. Proofs of Theorem 1 is given in Appendix B.3.
179
+
180
+ In Theorem 2, we present the communication times of UCB-TCOM. It shows how parameter $\alpha$ influences the communication of optimal and suboptimal arms observations.
181
+
182
+ Theorem 2. The communication times of UCB-TCOM has the following properties:
183
+
184
+ (i) When $\alpha \leqslant -\sqrt{2}$ , no communication occurs among agents.
185
+ (ii) When $-\sqrt{2} < \alpha < \sqrt{2}$ and $\beta > 1$ , the number of broadcasts of observations of the optimal arm by one agent is $O(\log (\log T))$ . More rigorously, it is less than
186
+
187
+ $$
188
+ \log_ {\beta} \left(\left(\frac {\sqrt {2} + \alpha}{\sqrt {2} - \alpha}\right) ^ {2} \left(\frac {8 \log T}{\Delta_ {2} ^ {2}} + M K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1}\right)\right). \tag {6}
189
+ $$
190
+
191
+ (iii) When $\alpha > 1$ , almost all observations of suboptimal arms—except for a finite number independent of $T$ —are broadcast.
192
+ (iv) When $\alpha \geqslant \frac{2\sqrt{2}\mu(1)}{\Delta_2}$ , almost all observations of the optimal arm—except for a finite number that is independent of $T$ —are broadcast.
193
+
194
+ The proof of Theorem 2(i) follows from the fact that when $\alpha < -\sqrt{2}$ , the pulled arm's tLCB index is greater than those tUCB indexes of all other arms, which violates the communication arm set's condition in (3). Hence, the pulled arm never lies inside the communication arm set, and thus no communication occurs. The proof of Theorem 2(ii) is based on the geometric growth of the time interval between broadcasts and a key observation: the number of broadcasts of the optimal arm's observations is upper bounded by some suboptimal arm $k$ 's total number of pulls, that is, $\hat{n}_t^{(i)}(1) < ((\sqrt{2} + \alpha) / (\sqrt{2} - \alpha))^2 \hat{n}_t^{(i)}(k)$ . Theorems 2(iii) and 2(iv) are derived by excluding some well-designed small probability events and from the fact that the arm mean rewards lie inside their tunable confidence intervals with high probability. The proof of Theorem 2 is given in Appendix B.4.
195
+
196
+ Remark 3. Theorem 2's proof is not a simple extension of the analysis of previous algorithms. First, TCOM relies on a group of tunable confidence intervals to identify suboptimal arms and communicate new observations while ComEx are only based on arm empirical reward mean; a special case of tunable confidence intervals. Second, the observation-buffering broadcast incurs additional delays of the global observations sharing which is a unique challenge in TCOM. We defer the detailed explanation of both challenges to Appendix B.1.
197
+
198
+ # 4.2 DISCUSSIONS
199
+
200
+ (a) Regret optimality of UCB-TCOM. The following asymptotic lower bound has been established in the classic bandits literature (e.g., Lai et al. (1985)):
201
+
202
+ $$
203
+ \lim _ {T \rightarrow \infty} \inf _ {\Delta_ {k}} \frac {\mathbb {E} [ \mathrm {R} _ {\mathrm {T}} (\mathcal {A}) ]}{\log T} \geqslant \sum_ {k > 1} \frac {\Delta_ {k}}{\operatorname {K L} \left(\nu_ {k} , \nu_ {1}\right)}, \tag {7}
204
+ $$
205
+
206
+ where $\nu_{k}$ is the reward distribution of arm $k$ and KL is the Kullback-Leibler divergence between two probability distributions. When the reward distributions are Bernoulli or Gaussian, the regret lower bound can be written as $\Omega (\sum_{k > 1}(\log T / \Delta_k))$ (Lattimore & Szepesvári, 2020, §16). Since the CMA2B model has the same objective as the centralized (single agent) MAB model, it inherits this group regret lower bound (Wang et al., 2020a, §1.2) under any possible communication policies. Given this lower bound, Theorem 1's group regret upper bound in (7) is near-optimal, i.e., it is tight up to a constant factor.
207
+
208
+ Recall that the individual regret in (2) corresponds to the maximal individual regret among $M$ agents. Since the maximal individual regret is minimized when all agents uniformly pay the regret time, from the above group regret lower bound, one can obtain the following individual regret lower bound,
209
+
210
+ $$
211
+ \liminf_{T\to \infty}\frac{\mathbb{E}[\mathrm{R}_{\mathrm{T}}^{\mathrm{ind}}(\mathcal{A})]}{\log T}\geqslant \frac{1}{M}\sum_{k > 1}\frac{\Delta_{k}}{\mathrm{KL}(\nu_{k},\nu_{1})}.
212
+ $$
213
+
214
+ Compared to this lower bound, Theorem 1's individual regret upper bound in (5) is also near-optimal.
215
+
216
+ (b) Impact of cooperation and number of agents $M$ on group regret. If there is no cooperation among the $M$ agents—each agent individually plays the game, then the group regret is lower bounded as follows:
217
+
218
+ $$
219
+ \operatorname * {l i m i n f} _ {T \to \infty} \frac {\mathbb {E} [ \mathrm {R} _ {\mathrm {T}} (\mathcal {A}) ]}{\log T} \geqslant M \sum_ {k > 1} \frac {\Delta_ {k}}{\mathrm {K L} (\nu_ {k} , \nu_ {1})},
220
+ $$
221
+
222
+ which is $M$ times worse than the group regret achieved by UCB-TCOM in Theorem 1. This highlights the benefits of cooperation. Furthermore, when the number of rounds $T$ is large, the second term containing an $M$ factor in the group regret (4) is negligible. That is, the group regret of UCB-TCOM does not increase rapidly when the number of agents $M$ increases, while it would without cooperation.
223
+
224
+ (c) Individual regret comparison to the leader-follower algorithm. Equation (5) shows that the individual regret of UCB-TCOM deceases as the number of agents $M$ increases. This is in contrast to DPE2 (Wang et al., 2020a) based on the leader-follower paradigm, which attributes all exploration costs to a single leader, and thus DPE2's individual regret is $M$ times worse than that of UCB-TCOM. In many large-scale distributed systems, e.g., edge computing, database, etc., computational resources are often distributed geographically. The largest individual computational time that these distributed nodes (agents) need to pay can be mapped to the individual regret. From this perspective, UCB-TCOM with the near-optimal individual regret is preferable to DPE2 especially when $M$ is large.
225
+
226
+ (d) Communication Times of UCB-TCOM. Theorem 2(ii) shows that when $-\sqrt{2} < \alpha < \sqrt{2}$ , the number of broadcasts of the optimal arm's observations is $O(\log (\log T))$ . Additionally, the observation buffering mechanism also aggregates the suboptimal arms $O(\log T)$ observations into $O(\log (\log T))$ broadcasts. So, in this case, UCB-TCOM only incurs $O(\log (\log T))$ communication time. Combined the above with Theorem 1 shows that when $1 < \alpha < \sqrt{2}$ and $\beta > 1$ , UCB-TCOM enjoys the near-optimal group and individual regret upper bounds with only $O(\log (\log T))$ communication times.
227
+
228
+ We also discuss how parameters $\alpha$ and $\beta$ of TCOM influence cooperation among the agents and how they impact communication times and regret in Appendix D.
229
+
230
+ # 5 NUMERICAL SIMULATIONS
231
+
232
+ Experiment setup. Unless otherwise stated, the experiments consist of $M = 25$ agents and $K = 20$ arms, communication set parameter $\alpha = 1.2$ , buffering ratio $\beta = 2$ , and $T = 30,000$ . Each arm is
233
+
234
+ ![](images/4eb62514fbbcbc59449231a9d42fd7de7a18cb0b7fc8ddba0c0b33f0724400e5.jpg)
235
+
236
+ ![](images/9c03c59b77b98b48271726ab757baec59ebf95eab1c64a6a5d10ad38203708b6.jpg)
237
+ (a) Group regret
238
+
239
+ ![](images/ecc6c8c692dc17633accddd5ddf4b8e80efe36b49266efac56ff11ce65cf63cd.jpg)
240
+ (b) Individual regret
241
+
242
+ ![](images/502a33b7e3bad2db294e59382087ffb08b1fba8bf54613e8ef75dc86518fc71b.jpg)
243
+ (c) Communications
244
+ Figure 1: UCB-TCOM vs. Dec_UCB, GosInE, DPE2, ComEx and COUCB
245
+
246
+ associated with a Bernoulli reward random variable whose mean is uniformly randomly taken from Ad-Clicks (Avito, 2015). All results are averaged over 50 trials and their standard deviations are plotted as shaded regions.
247
+
248
+ Comparison to state-of-the-art baselines. We consider five baselines: Dec_UCB (Zhu et al., 2021a), GosInE (Chawla et al., 2020) DPE2 (Wang et al., 2020a), ComEx (Madhushani & Leonard, 2021), and COUCB. The first four are discussed in Table 1. To compare all algorithms fairly, we implement them using the same undirected complete communication graph where agents can broadcast its observations to all other agents. The communication times of GosInE is set to as small as the algorithm allows. COUCB is a naive adaptation of UCB in CMA2B, where each agent runs an instance of UCB and always immediately broadcasts its new reward observations to other agents.
249
+
250
+ Comparison results are reported in Figure 1. Both group and individual regrets are reported in Figures 1a and 1b as a function of time. UCB-TCOM performs only slightly worse than the best algorithm (DPE2 in group regrets, COUCB in individual regrets). Figure 1c compares the communication times of these algorithms in the log scale. It corroborates the fact that UCB-TCOM-1's $O(\log (\log T))$ communication times lie between ComEx and GosInE's $O(\log T)$ and DPE2's $O(1)$ performance.
251
+
252
+ We also provide additional simulations in Appendices D, E and F.
253
+
254
+ # 6 CONCLUSION
255
+
256
+ In this paper, we proposed a communication policy TCOM which, combined with diverse single-agent bandit learning algorithms, can be used to devise fully distributed cooperative multi-agent bandit learning algorithms. We specifically study the UCB-TCOM algorithm where agents pull arms via UCB and communicates via TCOM. We show that UCB-TCOM can attain both near-optimal group and individual regrets while only incurring $O(\log (\log T))$ communication costs. Extensive simulations are also reported to support our results.
257
+
258
+ We note that TCOM can also be tuned to balance the regret and communications. One interesting future work is to systemically study the Pareto frontier of group/individual regrets vs. communication times from the perspective of the upper bounds of TCOM and the intrinsic lower bounds (difficulties) of CMA2B as well.
259
+
260
+ # 7 ACKNOWLEDGEMENTS
261
+
262
+ The work of Mohammad Hajiesmaili is supported by NSF CAREER-2045641, CPS-2136199, CNS-2106299, and CNS-2102963. The work of Don Towsley is funded in part by US Army contract W911NF-17-2-0196. The work of John C.S. Lui is supported in part by the RGC's GRF 14215722. Lin Yang is the corresponding author.
263
+
264
+ # REFERENCES
265
+
266
+ Deepak Agarwal, Bee-Chung Chen, Pradheep Elango, Nitin Motgi, Seung-Taek Park, Raghu Ramakrishnan, Scott Roy, and Joe Zachariah. Online models for content optimization. Advances in Neural Information Processing Systems, 21, 2008.
267
+ Shipra Agrawal and Navin Goyal. Analysis of thompson sampling for the multi-armed bandit problem. In Conference on learning theory, pp. 39-1. JMLR Workshop and Conference Proceedings, 2012.
268
+ Peter Auer, Nicolo Cesa-Bianchi, and Paul Fischer. Finite-time analysis of the multiarmed bandit problem. Machine learning, 47(2):235-256, 2002.
269
+ Avito. Avito Context Ad Clicks, 2015. https://www.kaggle.com/c/avito-context-ad-clicks.
270
+ Yogev Bar-On and Yishay Mansour. Individual regret in cooperative nonstochastic multi-armed bandits. Advances in Neural Information Processing Systems, 32, 2019.
271
+ Etienne Boursier and Vianney Perchet. Sic-mmab: synchronisation involves communication in multiplayer multi-armed bandits. Advances in Neural Information Processing Systems, 32, 2019.
272
+ Sébastien Bubeck. Bandits games and clustering foundations. PhD thesis, Université des Sciences et Technologie de Lille-Lille I, 2010.
273
+ Swapna Buccapatnam, Jian Tan, and Li Zhang. Information sharing in distributed stochastic bandits. In 2015 IEEE Conference on Computer Communications (INFOCOM), pp. 2605-2613. IEEE, 2015.
274
+ Ronshee Chawla, Abishek Sankararaman, Ayalvadi Ganesh, and Sanjay Shakkottai. The gossiping insert-eliminate algorithm for multi-agent bandits. In International Conference on Artificial Intelligence and Statistics, pp. 3471-3481. PMLR, 2020.
275
+ Alan Demers, Srinivasan Keshav, and Scott Shenker. Analysis and simulation of a fair queueing algorithm. ACM SIGCOMM Computer Communication Review, 19(4):1-12, 1989.
276
+ Thomas Desautels, Andreas Krause, and Joel W Burdick. Parallelizing exploration-exploitation tradeoffs in gaussian process bandit optimization. Journal of Machine Learning Research, 15: 3873-3923, 2014.
277
+ Abhimanyu Dubey et al. Cooperative multi-agent bandits with heavy tails. In International Conference on Machine Learning, pp. 2730-2739. PMLR, 2020.
278
+ Eyal Even-Dar, Shie Mannor, Yishay Mansour, and Sridhar Mahadevan. Action elimination and stopping conditions for the multi-armed bandit and reinforcement learning problems. Journal of machine learning research, 7(6), 2006.
279
+ Raphaël Féraud, Réda Alami, and Romain Laroche. Decentralized exploration in multi-armed bandits. In International Conference on Machine Learning, pp. 1901-1909. PMLR, 2019.
280
+ Zijun Gao, Yanjun Han, Zhimei Ren, and Zhengqing Zhou. Batched multi-armed bandits problem. Advances in Neural Information Processing Systems, 32, 2019.
281
+ Osama A Hanna, Lin F Yang, and Christina Fragouli. Solving multi-arm bandit using a few bits of communication. arXiv preprint arXiv:2111.06067, 2021.
282
+ Eshcar Hillel, Zohar S Karnin, Tomer Koren, Ronny Lempel, and Oren Somekh. Distributed exploration in multi-armed bandits. Advances in Neural Information Processing Systems, 26, 2013.
283
+ Wassily Hoeffding. Probability inequalities for sums of bounded random variables. In The collected works of Wassily Hoeffding, pp. 409-426. Springer, 1994.
284
+ Ruiquan Huang, Weiqiang Wu, Jing Yang, and Cong Shen. Federated linear contextual bandits. Advances in Neural Information Processing Systems, 34, 2021.
285
+
286
+ Jingyan Jiang, Liang Hu, Pingting Hao, Rui Sun, Jiejun Hu, and Hongtu Li. Q-fdba: improving qoe fairness for video streaming. Multimedia Tools and Applications, 77(9):10787-10806, 2018.
287
+ Long Jin, Shuai Li, Lin Xiao, Rongbo Lu, and Bolin Liao. Cooperative motion generation in a distributed network of redundant robot manipulators with noises. IEEE Transactions on Systems, Man, and Cybernetics: Systems, 48(10):1715-1724, 2017.
288
+ Wassim Jouini, Damien Ernst, Christophe Moy, and Jacques Palicot. Upper confidence bound based decision making strategies and dynamic spectrum access. In 2010 IEEE International Conference on Communications, pp. 1-5. IEEE, 2010.
289
+ Nikolai Karpov, Qin Zhang, and Yuan Zhou. Collaborative top distribution identifications with limited interaction. In 2020 IEEE 61st Annual Symposium on Foundations of Computer Science (FOCS), pp. 160-171. IEEE, 2020.
290
+ Ravi Kumar Kolla, Krishna Jagannathan, and Aditya Gopalan. Collaborative learning of stochastic bandits over a social network. IEEE/ACM Transactions on Networking, 26(4):1782-1795, 2018.
291
+ Tze Leung Lai, Herbert Robbins, et al. Asymptotically efficient adaptive allocation rules. Advances in applied mathematics, 6(1):4-22, 1985.
292
+ Peter Landgren, Vaibhav Srivastava, and Naomi Ehrich Leonard. Distributed cooperative decision-making in multiarmed bandits: Frequentist and bayesian algorithms. In 2016 IEEE 55th Conference on Decision and Control (CDC), pp. 167-172. IEEE, 2016.
293
+ Tor Lattimore and Csaba Szepesvári. Bandit algorithms. Cambridge University Press, 2020.
294
+ Lihong Li, Wei Chu, John Langford, and Robert E Schapire. A contextual-bandit approach to personalized news article recommendation. In Proceedings of the 19th international conference on World wide web, pp. 661-670, 2010.
295
+ Shuai Li, Ruofan Kong, and Yi Guo. Cooperative distributed source seeking by multiple robots: Algorithms and experiments. IEEE/ASME Transactions on mechatronics, 19(6):1810-1820, 2014.
296
+ Keqin Liu and Qing Zhao. Distributed learning in multi-armed bandit with multiple players. IEEE transactions on signal processing, 58(11):5667-5681, 2010.
297
+ Richard TB Ma, Dah Ming Chiu, John CS Lui, Vishal Misra, and Dan Rubenstein. Internet economics: The use of shapley value for isp settlement. IEEE/ACM Transactions on networking, 18(3):775-787, 2010.
298
+ Udari Madhushani and Naomi Leonard. It doesn't get better and here's why: A fundamental drawback in natural extensions of ucb to multi-agent bandits. In "I Can't Believe It's Not Better!" NeurIPS 2020 workshop, 2020.
299
+ Udari Madhushani and Naomi Leonard. When to call your neighbor? strategic communication in cooperative stochastic bandits. arXiv preprint arXiv:2110.04396, 2021.
300
+ David Martínez-Rubio, Varun Kanade, and Patrick Rebeschini. Decentralized cooperative stochastic bandits. Advances in Neural Information Processing Systems, 32, 2019.
301
+ Abbas Mehrabian, Etienne Boursier, Emilie Kaufmann, and Vianney Perchet. A practical algorithm for multiplayer bandits when arm means vary among players. In International Conference on Artificial Intelligence and Statistics, pp. 1211-1221. PMLR, 2020.
302
+ John Rawls. A theory of justice. In Ethics, pp. 229-234. Routledge, 2004.
303
+ Chengshuai Shi and Cong Shen. Federated multi-armed bandits. In Proceedings of the 35th AAAI Conference on Artificial Intelligence (AAAI), 2021.
304
+ Chengshuai Shi, Cong Shen, and Jing Yang. Federated multi-armed bandits with personalization. In International Conference on Artificial Intelligence and Statistics, pp. 2917–2925. PMLR, 2021a.
305
+
306
+ Chengshuai Shi, Wei Xiong, Cong Shen, and Jing Yang. Heterogeneous multi-player multi-armed bandits: Closing the gap and generalization. Advances in Neural Information Processing Systems, 34, 2021b.
307
+ Rayadurgam Srikant and Lei Ying. Communication networks: an optimization, control, and stochastic networks perspective. Cambridge University Press, 2013.
308
+ Balazs Szorenyi, Róbert Busa-Fekete, István Hegedus, Róbert Ormándi, Márk Jelasy, and Balázs Kégl. Gossip-based distributed stochastic bandit algorithms. In International Conference on Machine Learning, pp. 19-27. PMLR, 2013.
309
+ Chao Tao, Qin Zhang, and Yuan Zhou. Collaborative learning with limited interaction: Tight bounds for distributed exploration in multi-armed bandits. In 2019 IEEE 60th Annual Symposium on Foundations of Computer Science (FOCS), pp. 126–146. IEEE, 2019.
310
+ Po-An Wang, Alexandre Proutiere, Kaito Ariu, Yassir Jedra, and Alessio Russo. Optimal algorithms for multiplayer multi-armed bandits. In International Conference on Artificial Intelligence and Statistics, pp. 4120–4129. PMLR, 2020a.
311
+ You-Gan Wang. Sequential allocation in clinical trials. Communications in Statistics-Theory and Methods, 20(3):791-805, 1991.
312
+ Yuanhao Wang, Jiachen Hu, Xiaoyu Chen, and Liwei Wang. Distributed bandit learning: Near-optimal regret with efficient communication. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020, 2020b.
313
+ Lin Yang, Yu-zhen Janice Chen, Mohammad Hajiesmaili, John Lui, and Don Towsley. Distributed bandits with heterogeneous agents. arXiv preprint arXiv:2201.09353, 2022.
314
+ Jingxuan Zhu and Ji Liu. A distributed algorithm for multi-armed bandit with homogeneous rewards over directed graphs. In 2021 American Control Conference (ACC), pp. 3038-3043. IEEE, 2021.
315
+ Jingxuan Zhu, Romeil Sandhu, and Ji Liu. A distributed algorithm for sequential decision making in multi-armed bandit with homogeneous rewards. In 2020 59th IEEE Conference on Decision and Control (CDC), pp. 3078-3083. IEEE, 2020.
316
+ Jingxuan Zhu, Ethan Mulle, Christopher Salomon Smith, and Ji Liu. Decentralized multi-armed bandit can outperform classic upper confidence bound. arXiv preprint arXiv:2111.10933, 2021a.
317
+ Zhaowei Zhu, Jingxuan Zhu, Ji Liu, and Yang Liu. Federated bandit: A gossiping approach. In Abstract Proceedings of the 2021 ACM SIGMETRICS/International Conference on Measurement and Modeling of Computer Systems, pp. 3-4, 2021b.
318
+
319
+ # A RELATED WORKS
320
+
321
+ CMA2B with a central server or leader. Many distributed bandits models (Wang et al., 2020b; a; Shi & Shen, 2021; Shi et al., 2021a; Mehrabian et al., 2020; Shi et al., 2021b) assume the existence of a central server or elect a leader among agents. We choose two mostly related works to discuss. Wang et al. (2020b) consider the CMA2B model with a central server. The server collects information from each agent and then sends its aggregated information back to each agent. Specifically, Wang et al. (2020b) propose a phased-based elimination algorithm that achieves the near-optimal problem-independent regret bound $O(\sqrt{KMT\log(KM)})$ with finite number of communication times (independent of the number of rounds $T$ ). However, when it comes to near-optimal problem-dependent regret upper bound which is the focus of this paper, their algorithm suffers logarithmic communication times while our algorithm only incurs $O(\log (\log T))$ communications. Wang et al. (2020a) propose the DPE2 algorithm for CMA2B based on a leader-follower paradigm: The leader takes the responsibility of exploring while other agents (followers) only exploit arms recommended by the leader. Although it achieves the optimal group regret with finite number of communications, the DPE2 algorithm is inherently unfair — one single agent, the leader, incurs almost all regret $O(K\log T)$ while other followers only incur finite regrets $O(1)$ . We also note that algorithms of previous works heavily rely on a central server (or an elected leader) and thus are vulnerable to attacks on the central server (or a leader), while fully distributed algorithms that this paper studies can tolerate such attacks.
322
+
323
+ CMA2B without a central server. The fully decentralized CMA2B model has been studied in Buccapatnam et al. (2015); Landgren et al. (2016); Kolla et al. (2018); Martínez-Rubio et al. (2019), where they mainly focus on minimizing regrets and seldom quantify their communication times. Some of these algorithms assume that agents can gossip with each other, e.g., via a graph represented by the gossiping matrix (Martínez-Rubio et al., 2019; Chawla et al., 2020), or via peer-to-peer protocols (Szorenyi et al., 2013). In their gossip setting, agents can only communicate with their neighbors. This is fairly different from our all-agent broadcast communication (in Appendix G.2, we relax the all-agent broadcast in TCOM to communication with neighbors in the graph). Recently, Madhushani & Leonard (2021) proposed a communication protocol, ComEx, which aims to limit the communications of optimal arm's reward observations. Our tunable communication policy TCOM subsumes ComEx as a special case (see Remark 1). Also, ComEx incurs $O(\log T)$ communication times while our algorithm only needs $O(\log (\log T))$ to attain the near-optimal regret upper bound.
324
+
325
+ Beyond the above related work, there are other works on distributed bandits such as federated bandits (Shi & Shen, 2021; Shi et al., 2021a; Zhu et al., 2021b; Huang et al., 2021), distributed bandits with collision setting (Boursier & Perchet, 2019; Mehrabian et al., 2020; Wang et al., 2020b; Shi et al., 2021b), and cooperative pure exploration (Hillel et al., 2013; Tao et al., 2019; Karpov et al., 2020). The federated bandits (Shi & Shen, 2021; Shi et al., 2021a; Zhu et al., 2021b; Huang et al., 2021) consider heterogeneous local reward distributions, which is different from our homogeneous reward environment. In distributed bandits with collision setting model (Boursier & Perchet, 2019; Mehrabian et al., 2020; Wang et al., 2020b; Shi et al., 2021b), when more than one agent chooses the same arm, each of these agents gets a zero reward, while in our model, each of these agents obtains an independent reward. Cooperative pure exploration (best arm identification) (Hillel et al., 2013; Tao et al., 2019; Féraud et al., 2019; Karpov et al., 2020) study how multi-agent cooperation can reduce the sample complexity of identifying the best arm which is a different objective from our work on regret minimization. Some of their high-level ideas in communication are similar to TCOM, e.g., Hillel et al. (2013) utilize the doubling phase for communication and Féraud et al. (2019)'s algorithm communicates bad arms. However, as we discussed in Appendix B.1, our algorithm is novel in specific design detail, and thus introduces unique challenges that cannot be addressed by the techniques of these known works.
326
+
327
+ Individual regret in CMA2B With the majority of previous CMA2B works studying the group regret objective, the individual regrets were also studied sparingly, e.g., in stochastic CMA2B (Zhu et al., 2021a; Zhu & Liu, 2021; Zhu et al., 2020), in adversarial CMA2B (Bar-On & Mansour, 2019), and in federated bandits (Zhu et al., 2021b). Among them, Zhu et al. (2021a) was the most related to ours, where they devised algorithms that achieved the near-optimal individual regret with $O(T)$ communication times, while our algorithms, with near-optimal individual regrets as well, only needs $O(\log (\log T))$ communication times.
328
+
329
+ # A.1 DETAIL COMPARISON TO COMEX
330
+
331
+ TCOM shares some high-level ideas from known algorithms. However, when it comes to the algorithm design details, TCOM needs to resolve unique challenges to improve the state-of-the-art result. There are three algorithmic ideas in the design of TCOM which enables improving the state-of-the-art result in the literature. In the following, we outline each idea separately and clarify its novelty as compared to the most relevant prior work.
332
+
333
+ 1. On how to identify suboptimal arms: The most related work with similar high-level algorithmic idea is ComEx [Madhushani et. al., 2021] where arms with suboptimal empirical reward means are identified as suboptimal in communication. In TCOM, however, we utilize a group of tunable confidence intervals, which as compared to the empirical means in ComEx, provides a clearer separation between optimal and suboptimal arms. Further, we emphasize that although using confidence intervals in bandits is a classic technique for minimizing regret, TCOM instead uses confidence intervals to decide whether an arm's observation should be shared or not. It is worth noting that the dynamic construction of the communication arm set emphasizes the need for different proof techniques than those in ComEx, which is elaborated in Appendix B.1.
334
+ 2. On how to buffer observations: Observation buffering (a.k.a. phase-based communication) has been used in previous works, e.g., [Shi et. al., 2021], where applying this techniques can achieve a $O(\log T)$ communication times. However, TCOM has its own technique for construction of communication arm sets (relevant to the first algorithmic idea above), which already enjoys a $O(\log T)$ communication times improvement, without using batched communication. Therefore, simply applying the batched communication technique does not automatically improve the communication times of TCOM. Instead, we design an arm-specific observation-buffering mechanism which allows each arm—depending on whether it lies in the communication arm set—to have its own specific and dynamic phases for communication. This carefully designed observation-buffering technique can boost TCOM's first communication arm set technique and further reduce the communication times to $O(\log (\log T))$ .
335
+ 3. On algorithm design with a tunable parameter: Last, unique to our algorithm design, we introduced two tunable parameters $\alpha$ and $\beta$ that determine how to add arms into the dynamic communication arm set based on their confidence intervals. This addition provides greater flexibility to TCOM, which is explained in details in Appendix D.
336
+
337
+ # B PROOFS IN ANALYSIS (SECTION 4)
338
+
339
+ # B.1 HIGHLIGHT OF ANALYSIS CHALLENGES
340
+
341
+ TCOM utilizes two techniques: communication arm set construction (§3.1.1) and observation-buffering broadcast (§3.1.2). Each technique alone can reduce the communication times to $O(\log T)$ , and TCOM combines both to further reduce the communication times to $O(\log (\log T))$ . This $O(\log (\log T))$ result comes directly from the TCOM policy design. The major challenge in analysis is to establish that—with these reduced and delayed sharing observations due to TCOM—the UCB-TCOM algorithm still preserves the near-optimal group and individual regrets. To do so, we show (1) the communication arm set construction technique can prevent communicating most optimal arm observations while allow communicating most suboptimal arm observations; (2) the observation sharing delay due to the observation-buffering broadcast technique (upon the communication arm set technique) does not have an intrinsic impact on UCB-TCOM's group and individual regret performance.
342
+
343
+ (1) Although the communication arm set construction technique shares the high level idea of ComEx (Madhushani & Leonard, 2021), the analysis of ComEx is not applicable to TCOM. Because the empirical means utilized for identifying the suboptimal arms in ComEx is only a special case of the tunable confidence intervals that TCOM relies on. Therefore, to show that agents can selectively share suboptimal arms' observations by the communication arm set technique, we prove two new results: (a) the optimal arm's observations are often not broadcast (i.e., the optimal arm is often not in the communication arm set, Theorem 2(ii)); (b) suboptimal arm observations are almost
344
+
345
+ always broadcast (i.e., the suboptimal arms are often in the set, Theorem 2(iii)). The (a) is proved via showing that the number of times that the optimal arm is in the communication arm set is no greater than that of an suboptimal arm (multiplied by a constant factor); thus is upper bounded by $O(\log T)$ . This is based on revealing a relation between the bandit arm pull policy and the communication arm set construction. Then (b) is proved via showing whenever an agent pulls a suboptimal arm, the arm is inside the communication arm set with a high probability, and thus the suboptimal arm's observations are almost always buffered for later communication.
346
+
347
+ Our observation-buffering broadcast mechanism shares a high-level idea of doubling phase technique, but its specific design is different from known ones, and, consequently, its analysis addresses novel challenges. The doubling phase technique was utilized in CMA2B literature, e.g., Boursier & Perchet (2019); Wang et al. (2020b); Shi et al. (2021b), where their arm pull policies within a phase were either uniformly pulling each arm (Boursier & Perchet, 2019; Wang et al., 2020a) or sticking to several arms (Shi et al., 2021b). However, different from these previous "rigid" arm pull policies, UCB-TCOM applies the "flexible" UCB arm pull policy. In order to adapt the doubling phase technique to the UCB policy, we propose the observation-buffering broadcast mechanism which separately buffers the observation of each arms and respectively communicates each arm's compound observations whenever this arm's observation times is doubled (or generally, increased by a $\beta$ factor).
348
+
349
+ (2) Since this observation-buffering broadcast is different from known doubling phase algorithms, their existing analysis approaches are not applicable to observation-buffering. Hence, proving that this broadcast mechanism does not intrinsically deteriorate the near-optimal group and individual regrets of UCB-TCOM is a unique challenge. More specifically, we prove that the delay due to buffering only makes regrets worse by a constant factor. This is via showing that the observation-buffering mechanism separately buffers each arm's observations, and, therefore, the observation delay of each arm's observations (as well as the pulling times of this arm during the delay) can be respectively upper bounded by at most a constant factor multiplying this arm's total previous pulling times (see (9) of Theorem 1's proof as a formal expression).
350
+
351
+ # B.2 PRELIMINARIES
352
+
353
+ In this subsection, we provide several basic results and definitions that are crucial in the later analysis.
354
+
355
+ Definition 1 (Type-I and Type-II decisions). An agent makes a Type-I decision in time slot $t$ if in this time slot all arms' true reward means $\mu(k)$ lie inside this agent's tunable confidence intervals $(\mathsf{tLCB}_t(k, \alpha), \mathsf{tUCB}_t(k, \alpha))$ respectively. Otherwise, the agent makes a Type-II decision.
356
+
357
+ Lemma 1. For any arm $k \in [K]$ , any agent $i \in [M]$ , and time slot $t \in [T]$ , the probability that the arm $k$ 's true reward mean $\mu(k)$ lies inside its tunable confidence interval $(\mathsf{tLCB}_t(k, \alpha), \mathsf{tUCB}_t(k, \alpha))$ (calculated by agent $i$ ) is no less than $1 - 2t^{1 - 2\alpha^2}$ .
358
+
359
+ Proof of Lemma 1. We first bound the probability that the true reward mean is no less than the tunable upper confidence bound tUCB.
360
+
361
+ $$
362
+ \begin{array}{l} \mathbb {P} \left(\mu (k) \geqslant \mathrm {t U C B} _ {t} (k, \alpha)\right) = \mathbb {P} \left(\mu (k) - \hat {\mu} _ {t} (k) \geqslant \alpha \sqrt {\frac {\log t}{\hat {n} _ {t} (k)}}\right) \\ = \sum_ {s = 1} ^ {t} \mathbb {P} \left(\mu (k) - \hat {\mu} _ {t} (k) \geqslant \alpha \sqrt {\frac {\log t}{\hat {n} _ {t} (k)}} \Bigg | \hat {n} _ {t} (k) = s\right) \mathbb {P} (\hat {n} _ {t} (k) = s) \\ \leqslant \sum_ {s = 1} ^ {t} \mathbb {P} \left(\mu (k) - \hat {\mu} _ {t} (k) \geqslant \alpha \sqrt {\frac {\log t}{\hat {n} _ {t} (k)}} \mid \hat {n} _ {t} (k) = s\right) \\ \stackrel {(a)} {\leqslant} \sum_ {s = 1} ^ {t} t ^ {- 2 \alpha^ {2}} \\ \leqslant t ^ {1 - 2 \alpha^ {2}}, \\ \end{array}
363
+ $$
364
+
365
+ where the inequality (a) is by the Hoeffding's inequality and that the random variable $\mu(k) - \hat{\mu}_t(k)$ is $\sqrt{1/4\hat{n}_t(k)}$ -subgaussian. With a similar derivation, the probability that the true reward mean $\mu(k)$
366
+
367
+ is no greater than the tunable lower confidence bound $\mathsf{tLCB}$ , i.e., $\mu (k)\leqslant \mathsf{tLCB}_t(k,\alpha)$ , can also be upper bounded by $t^{1 - 2\alpha^2}$ . Therefore, by excluding the above two events, the probability that the true reward mean lies inside the tunable confidence interval is no less than $1 - 2t^{1 - 2\alpha^2}$ .
368
+
369
+ Lemma 2 (Bound the number of times of Type-II decisions). When $\alpha > 1$ , the expected total number of times of Type-II decisions by any agent at any time slot in the system is finite. Specifically, it is less than $K \frac{2\alpha^2 - 1}{\alpha^2 - 1}$ .
370
+
371
+ Proof of Lemma 2. That a time slot $t$ is Type-II means that there exists at least one arm whose reward mean lies outside its tunable confidence interval. We bound the expected total number of times of Type-II decisions as follows,
372
+
373
+ $$
374
+ \begin{array}{l} \sum_ {t = 1} ^ {T} \mathbb {P} (\exists k \in [ K ] \text {s u c h t h a t} \mu (k) \notin \left(\mathrm {t L C B} _ {t} (k, \alpha), \mathrm {t L C B} _ {t} (k, \alpha)\right)) \\ \leqslant \sum_ {t = 1} ^ {T} \sum_ {k = 1} ^ {K} \mathbb {P} (\mu (k) \notin (t \mathrm {L C B} _ {t} (k, \alpha), t \mathrm {L C B} _ {t} (k, \alpha))) \\ \leqslant K \sum_ {t = 1} ^ {T} \mathbb {P} (\mu (k) \notin (\mathrm {t L C B} _ {t} (k, \alpha), \mathrm {t L C B} _ {t} (k, \alpha))) \\ \stackrel {(a)} {\leqslant} 2 K \sum_ {t = 1} ^ {T} t ^ {1 - 2 \alpha^ {2}} \\ \leqslant 2 K \left(1 + \int_ {t = 1} ^ {T} t ^ {1 - 2 \alpha^ {2}} d t\right) \\ \stackrel {(b)} {\leqslant} M K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1}, \\ \end{array}
375
+ $$
376
+
377
+ where the inequality (a) holds for Lemma 1, and the inequality (b) holds because $\alpha > 1$ (i.e., $1 - 2\alpha^2 < -1$ ).
378
+
379
+ Next, we state a standard property of UCB algorithms and provide its proof for completeness.
380
+
381
+ Lemma 3 (Adapted from (Yang et al., 2022, Lemma 2)). If at any time $t \leqslant T$ agent $i \in [M]$ by UCB makes a Type-I decision and pulls arm $k$ , the total number of globally observed rewards of the arm $k$ by agent $i$ up to time $t$ is upper bounded as follows,
382
+
383
+ $$
384
+ \hat {n} _ {t} (k) \leqslant \frac {8 \log t}{\Delta_ {k} ^ {2}}.
385
+ $$
386
+
387
+ Proof of Lemma 3. Given that the suboptimal arm $k$ is pulled by agent $i$ , we have the following inequality
388
+
389
+ $$
390
+ 2 \times \sqrt {\frac {2 \log t}{\hat {n} _ {t} (k)}} \geqslant \Delta_ {k}. \tag {8}
391
+ $$
392
+
393
+ Otherwise, we have
394
+
395
+ $$
396
+ \begin{array}{l} \mathrm {U C B} _ {t} (1) = \hat {\mu} _ {t} (1) + \sqrt {\frac {2 \log t}{\hat {n} _ {t} (1)}} \\ \stackrel {(a)} {\geqslant} \mu (1) = \mu (k) + \Delta_ {k} \\ \stackrel {(b)} {>} \mu (k) + 2 \sqrt {\frac {2 \log t}{\hat {n} _ {t} (k)}} \\ \stackrel {(c)} {>} \hat {\mu} _ {t} (k) + \sqrt {\frac {2 \log t}{\hat {n} _ {t} (k)}} \\ = \mathrm {U C B} _ {t} (k), \\ \end{array}
397
+ $$
398
+
399
+ which contradicts the pulling of arm $k$ . Note that inequalities (a) and (c) are from Type-I decision's definition that reward means lie inside confidence interval, and inequality (b) is due to the inverse of (8). Therefore, (8) holds and rearranging (8) concludes the proof.
400
+
401
+ # B.3 PROOFS OF GROUP REGRET (THEOREM 1)
402
+
403
+ Proof of Theorem 1. To bound the regret, one needs to bound the number of times of pulling suboptimal arms by all $M$ agents in the system. Let $k$ be the index of a suboptimal arm. In UCB-TCOM, the reward observations of pulling arm $k$ are only broadcast (together as reward means) whenever arm $k$ 's local observation times is increased by a $\beta$ ratio. So, there are delays before receiving other agents' observations. These delays may cause each agent pulling additional number of times of arm $k$ . Next, we show that such delays do not significantly increase arm $k$ 's overall pulling times in the system.
404
+
405
+ Note that the communication set contains all suboptimal arms when agents make Type-I decisions, in which case all suboptimal arm pulls increase the counter $N_{t}(k)$ by 1. Adding the other cost in Type-II decisions (at most $K\frac{2\alpha^{2} - 1}{\alpha^{2} - 1}$ by Lemma 2 and $\frac{2\alpha^2 - 1}{\alpha^2 - 1}$ for each arm), the total pulling times of an suboptimal arm $k$ among all $M$ agents are upper bounded by $\sum_{i\in [M]}N_t(k) + M\frac{2\alpha^2 - 1}{\alpha^2 - 1}$ .
406
+
407
+ Recall that $\tau_t(k)$ denotes the last time slot (before time slot $t$ ) when agent $i$ broadcasts arm $k$ 's buffered observations' average reward. To bound the first term in RHS, we have
408
+
409
+ $$
410
+ \sum_ {i \in [ M ]} N _ {t} (k) \stackrel {(a)} {\leqslant} \beta \sum_ {i \in [ M ]} N _ {\tau_ {t} (k)} (k) \stackrel {(b)} {\leqslant} \frac {8 \beta \log T}{\Delta_ {k} ^ {2}}, \tag {9}
411
+ $$
412
+
413
+ where inequality (a) is because the counter $N_{t}(k)$ is at most $\beta$ times greater than its value at the last broadcast time slot, and inequality (b) is because $\sum_{i\in [M]}N_{\tau_t(k)}(k)$ is total global observations of arm $k$ at time slot $t$ which is less than $8\Delta_k^{-2}\log T$ by Lemma 3.
414
+
415
+ At last, we sum up the regret cost of pulling suboptimal arms in Type-I decisions and the other cost in Type-II decisions (in total at most $MK^{\frac{2\alpha^2 - 1}{\alpha^2 - 1}}$ by Lemma 2 given $\alpha > 1$ ). So, the group regret is upper bounded as follows
416
+
417
+ $$
418
+ \mathbb {E} [ \mathsf {R} _ {\mathrm {T}} (\mathcal {A}) ] \leqslant \sum_ {k > 1} \frac {8 \beta \log T}{\Delta_ {k}} + M K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1}.
419
+ $$
420
+
421
+ Notice that executing Algorithm 1, agents have the same arm pull behavior. So, the group regret are evenly divided into each agent's individual regret and thus the individual regret is upper bound is upper bounded as follows,
422
+
423
+ $$
424
+ \mathbb {E} \left[ \mathrm {R} _ {\mathrm {T}} ^ {\mathrm {i n d}} (\mathcal {A}) \right] \leqslant \sum_ {k > 1} \frac {8 \beta \log T}{\Delta_ {k}} + M K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1}.
425
+ $$
426
+
427
+ ![](images/6bba72204f3f00ff1ab7706e3454ef3e4cffd80d1c0c549ade6301e482fb0272.jpg)
428
+
429
+ # B.4 PROOFS OF COMMUNICATION COSTS (THEOREM 2)
430
+
431
+ Proof of Theorem 2(i). No occurrence of communication for $\alpha \leqslant -\sqrt{2}$ is because that the pulled arm $A_{t}$ is never inside the communication set $\mathcal{C}_t(\alpha)$ in (3). When $\alpha \leqslant -\sqrt{2}$ , an arm's tLCB is greater than its original UCB index, that is, tLCB $_t(k,\alpha) = \hat{\mu}_t(k) - \mathrm{CI}_t(k,\alpha) \geqslant \hat{\mu}_t(k) + \sqrt{2\log t / \hat{n}_t(k)} = \mathrm{UCB}_t(k)$ ; and its tUCB is smaller than its UCB index, that is, tUCB $_t(k,\alpha) = \hat{\mu}_t(k) + \mathrm{CI}_t(k,\alpha) \leqslant \hat{\mu}_t(k) + \sqrt{2\log t / \hat{n}_t(k)} = \mathrm{UCB}_t(k)$ . Then, for the pulled arm $A_{t}$ , the inverse of the communication set's condition holds:
432
+
433
+ $$
434
+ \begin{array}{l} \mathrm {t L C B} _ {t} \left(A _ {t}, \alpha\right) \geqslant \mathrm {U C B} _ {t} \left(A _ {t}\right) \\ \stackrel {(a)} {\geqslant} \operatorname {U C B} _ {t} (k), \forall k \neq A _ {t} \\ \geqslant \mathsf {t U C B} _ {t} (k, \alpha), \forall k \neq A _ {t}, \\ \end{array}
435
+ $$
436
+
437
+ where the inequality $(a)$ is because that the pulled arm $A_{t}$ has the highest UCB index in time slot $t$ .
438
+
439
+ Proof of Theorem 2(ii). If a reward observation of optimal arm is broadcast by agent $i$ at time slot $t$ , then there exists at least one suboptimal arm $k$ such that the following two events hold at the same time:
440
+
441
+ $$
442
+ \mathrm {U C B} _ {t} (1) \geqslant \mathrm {U C B} _ {t} (k)
443
+ $$
444
+
445
+ $$
446
+ t \underline {{L C B}} _ {t} (1, \alpha) < t \underline {{U C B}} _ {t} (k, \alpha),
447
+ $$
448
+
449
+ where the first event is from the UCB arm selection policy, and the second event holds because the pulled arm 1 belongs to the communication set. These two events imply:
450
+
451
+ $$
452
+ \begin{array}{l} \int \hat {\mu} _ {t} (1) + \sqrt {2 \log t / \hat {n} _ {t} (1)} \geqslant \hat {\mu} _ {t} (k) + \sqrt {2 \log t / \hat {n} _ {t} (k)} \\ \left\{\hat {\mu} _ {t} (1) - \mathrm {C I} _ {t} (1, \alpha) < \hat {\mu} _ {t} (k) + \mathrm {C I} _ {t} (k, \alpha) \right. \\ \Longrightarrow \left\{ \begin{array}{l} \hat {\mu} _ {t} (1) - \hat {\mu} _ {t} (k) \geqslant \sqrt {2 \log t / \hat {n} _ {t} (1)} - \sqrt {2 \log t / \hat {n} _ {t} (k)} \\ \hat {\mu} _ {t} (1) - \hat {\mu} _ {t} (k) < \alpha \left(\sqrt {\log t / \hat {n} _ {t} (1)} + \sqrt {\log t / \hat {n} _ {t} (k)}\right) \end{array} \right. \\ \Longrightarrow (\sqrt {2} + \alpha) \sqrt {\log t / \hat {n} _ {t} (1)} > (\sqrt {2} - \alpha) \sqrt {\log t / \hat {n} _ {t} (k)}. \\ \end{array}
453
+ $$
454
+
455
+ After rearranging the inequality, we show that the number of observations of the optimal arm 1 is upper bounded by a factor multiplying the number of observations of a suboptimal arm as follows,
456
+
457
+ $$
458
+ \hat {n} _ {t} (1) < \left(\frac {\sqrt {2} + \alpha}{\sqrt {2} - \alpha}\right) ^ {2} \hat {n} _ {t} (k). \tag {10}
459
+ $$
460
+
461
+ Lemmas 2 and 3 together show that in the UCB-TCOM algorithm the maximum pulling times of any suboptimal arm $k$ is upper bounded as follows
462
+
463
+ $$
464
+ \hat {n} _ {t} (k) \leqslant \frac {8 \log T}{\Delta_ {k} ^ {2}} + K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1}. \tag {11}
465
+ $$
466
+
467
+ Substituting the above inequality to (10)'s RHS and choosing $k = 2$ (because $1 / \Delta_k^2$ is maximal when $k = 2$ ) for any time $t$ , we have
468
+
469
+ $$
470
+ \hat {n} _ {t} (1) < \left(\frac {\sqrt {2} + \alpha}{\sqrt {2} - \alpha}\right) ^ {2} \left(\frac {8 \log T}{\Delta_ {2} ^ {2}} + K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1}\right).
471
+ $$
472
+
473
+ Notice that whenever the optimal arm is pulled and lies inside the communication set, the counter $\hat{n}_t(1)$ increases by 1. So, the buffering counter $N_{t}(1)$ is no greater than $\hat{n}_t(1)$ . That is,
474
+
475
+ $$
476
+ N _ {t} (1) \leqslant \left(\frac {\sqrt {2} + \alpha}{\sqrt {2} - \alpha}\right) ^ {2} \left(\frac {8 \log T}{\Delta_ {2} ^ {2}} + K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1}\right).
477
+ $$
478
+
479
+ These $N_{t}(1)$ observations are aggregated and communicated at one time in each phase. These phases' lengths increase in a geometric sequence with common ratio $\beta$ . So, the number of communicated messages of the optimal arm 1 is at most
480
+
481
+ $$
482
+ \log_ {\beta} (N _ {t} (1)) \leqslant \log_ {\beta} \left(\left(\frac {\sqrt {2} + \alpha}{\sqrt {2} - \alpha}\right) ^ {2} \left(\frac {8 \log T}{\Delta_ {2} ^ {2}} + K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1}\right)\right) = O (\log (\log T)).
483
+ $$
484
+
485
+ ![](images/72fadb0c14eb554a8b3e6246262fe6f25e2f7d5ca0acac1ca5d6816194730f88.jpg)
486
+
487
+ Proof of Theorem 2(iii). We first show, if a suboptimal arm is pulled by a Type-I decision, its reward observation will definitely be broadcast, i.e., the pulled arm is inside the communication set. Because the pulled suboptimal arm $k$ 's tunable lower confidence bound $\mathsf{tLCB}(k,\alpha)$ is less than the optimal arm 1's tunable upper confidence bound $\mathsf{tUCB}(1,\alpha)$ : $\mathsf{tLCB}(k,\alpha) \leqslant \mu_k < \mu_1 \leqslant \mathsf{tUCB}(1,\alpha)$ .
488
+
489
+ Lemma 2 shows that when $\alpha >1$ , agents make Type-I decisions in almost all time slots (except finite Type-II decisions). Therefore, almost all observations of suboptimal arms are broadcast.
490
+
491
+ Proof of Theorem 2(iv). We first show that when the optimal arm 1 is pulled and the agent makes a Type-I decision in time slot $t$ , the event that $\mathsf{tLCB}_t(1, \alpha) > \mathsf{tLCB}_t(k, \alpha)$ , $\forall k \neq 1$ happens with small probability.
492
+
493
+ $$
494
+ \begin{array}{l} \mathbb {P} \left(\mathrm {t L C B} _ {t} (1, \alpha) > \mathrm {t L C B} _ {t} (k, \alpha), \forall k \neq 1 \mid A _ {t} = 1\right) \\ = \mathbb {P} (\hat {\mu} (1) - \mathrm {C I} _ {t} (1, \alpha) > \hat {\mu} (k) + \mathrm {C I} _ {t} (k, \alpha), \forall k \neq 1 | A _ {t} = 1) \\ \leqslant \mathbb {P} (\hat {\mu} (1) > \mathrm {C I} _ {t} (k, \alpha), \forall k \neq 1 | A _ {t} = 1) \\ \leqslant \min _ {k \neq 1} \mathbb {P} \left(\hat {\mu} (1) > \mathrm {C I} _ {t} (k, \alpha)\right) \\ = \min _ {k \neq 1} \mathbb {P} (\hat {\mu} (1) - \mu (1) > \mathrm {C I} _ {t} (k, \alpha) - \mu (1)) \tag {12} \\ \end{array}
495
+ $$
496
+
497
+ $$
498
+ \begin{array}{l} \stackrel {(a)} {\leqslant} \min _ {k \neq 1} \mathbb {P} \left(\hat {\mu} (1) - \mu (1) > \alpha \frac {\Delta_ {k}}{2 \sqrt {2}} - \mu (1)\right) \\ \stackrel {(b)} {\leqslant} \min _ {k \neq 1} \exp \left(- (\alpha \Delta_ {k} / 2 - \sqrt {2} \mu (1)) \hat {n} _ {t} (1)\right) \\ = \exp \left(- (\alpha \Delta_ {2} / 2 - \sqrt {2} \mu (1)) \hat {n} _ {t} (1)\right), \\ \end{array}
499
+ $$
500
+
501
+ where inequality (a) holds because (8) in Lemma 3's proof shows that when agent $i$ makes a Type-I decision to pull suboptimal arm $k$ , the confidence interval $\mathbb{C}\mathbb{I}$ 's width of this arm's reward mean is no less than half of the arm's reward gap, i.e., $\sqrt{\frac{2\log t}{\hat{n}_t(k)}} \geqslant \frac{\Delta_k}{2}$ , and inequality (b) holds because $\alpha \Delta_k - 2\sqrt{2} \mu(1) > 0$ and Hoeffding's inequality.
502
+
503
+ Next, we show that the number of times that the optimal arm 1's observations is not broadcast is finite.
504
+
505
+ $$
506
+ \begin{array}{l} \mathbb {E} \left[ \sum_ {t \in [ T ]} \mathbb {1} \left\{A _ {t} = 1 \text {a n d t h e o b s . i s n o t b r o a d c a s t} \right\} \right] \\ \leqslant \mathbb {E} \left[ \sum_ {t \in [ T ]} \mathbb {1} \left\{\text {a g e n t} i \text {m a k e s a T y p e - I I d e c i s i o n i n t i m e} t \right\} \right] \\ + \mathbb {E} \left[ \sum_ {t \in [ T ]} \mathbb {1} \left\{A _ {t} = 1 \text {a n d t h e o b s . i s n o t b r o a d c a s t a n d a T y p e - I d e c i s i o n i s m a d e i n t i m e} t \right\} \right] \\ \leqslant \mathbb {E} \left[ \sum_ {t \in [ T ]} \mathbb {1} \{t \text {i s T y p e - I I} \} \right] + \mathbb {E} \left[ \sum_ {t \in [ T ]} \mathbb {1} \{t \mathrm {L C B} _ {t} (1, \alpha) > t \mathrm {U C B} _ {t} (k, \alpha), \forall k \neq 1 | A _ {t} = 1 \} \right] \\ \leqslant \mathbb {E} \left[ \sum_ {t \in [ T ]} \mathbb {1} \{t \text {i s T y p e - I I} \} \right] + \mathbb {E} \left[ \sum_ {t \in [ T ]} \mathbb {1} \{t \mathrm {L C B} _ {t} (1, \alpha) > t \mathrm {L C B} _ {t} (k, \alpha), \forall k \neq 1 | A _ {t} = 1 \} \right] \\ \stackrel {(a)} {\leqslant} \mathbb {E} \left[ \sum_ {t \in [ T ]} \mathbb {1} \{t \text {i s T y p e - I I} \} \right] + \sum_ {t \in [ T ]} \exp \left(- \left(\alpha \Delta_ {2} / 2 - \sqrt {2} \mu (1)\right) \hat {n} _ {t} (1)\right) \\ \stackrel {(b)} {\leqslant} M K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1} + \int_ {n = 0} ^ {T} \exp \left(- (\alpha \Delta_ {2} / 2 - \sqrt {2} \mu (1)) n\right) d n \\ \leqslant M K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1} + \frac {2}{\alpha \Delta_ {2} - 2 \sqrt {2} \mu (1)}, \\ \end{array}
507
+ $$
508
+
509
+ where the inequality (a) is from applying (12) to the second term, and the inequality (b) bounds the number of times of Type-II decisions by Lemma 2.
510
+
511
+ # C APPLY PEELING TECHNIQUE TO RELAX THE TUNABLE PARAMETER'S CONSTRAINT
512
+
513
+ In this section, we apply peeling technique (Bubeck, 2010, §2.2) to relax Lemma 2 (thus Theorems 1 and 2(iii))'s $\alpha > 1$ constraint to $\alpha > \frac{1}{\sqrt{2}}$ . This finer result helps to move the critical point 1 in Figure 2's $\alpha$ -axis to $\frac{1}{\sqrt{2}}$ , and extend the red arrow range (with near-optimal group regret and $O(\log (\log T))$ communications) to $\left(\frac{1}{\sqrt{2}}, \sqrt{2}\right)$ .
514
+
515
+ Lemma 4 (Bound the number of times of Type-II decisions via peeling technique). Lemma 2's condition $\alpha > 1$ can be relaxed to $\alpha > \sqrt{\frac{1}{2\gamma}}$ via peeling technique for $\gamma \in (0,1)$ . The total number of Type-II decisions is no greater than
516
+
517
+ $$
518
+ 2 M K \left(\frac {2 \alpha^ {2} \gamma + 1}{2 \alpha^ {2} \gamma - 1} + \frac {1}{\log (1 / \gamma) (2 \alpha^ {2} \gamma - 1) ^ {2}}\right).
519
+ $$
520
+
521
+ Proof. The key idea is applying a peeling argument to enhance Lemma 1's proof. Denote $\gamma \in (0,1)$ . For any $s \leqslant t$ , there exists $j \in \left\{0,1,\dots, \lceil \frac{\log t}{\log(1 / \gamma)} \rceil \right\}$ such that $\gamma^j t < s \leqslant \gamma^{j+1}t$ . Then, we have
522
+
523
+ $$
524
+ \begin{array}{l} \mathbb {P} (\mu (k) \geqslant \mathsf {t U C B} _ {t} (k, \alpha)) = \mathbb {P} \left(\mu (k) - \hat {\mu} _ {t} (k) \geqslant \alpha \sqrt {\frac {\log t}{\hat {n} _ {t} (k)}}\right) \\ \leqslant \mathbb {P} \left(\exists s \leqslant t: \mu (k) - \hat {\mu} _ {t} (k) \geqslant \alpha \sqrt {\frac {\log t}{\hat {n} _ {t} (k)}} \mid \hat {n} _ {t} (k) = s\right) \\ \leqslant \sum_ {j = 0} ^ {\left\lceil \frac {\log t}{\log (1 / \gamma)} \right\rceil} \mathbb {P} \left( \right.\exists s \in \left( \right.\gamma^ {j} t, \gamma^ {j + 1} t \left. \right]: \mu (k) - \hat {\mu} _ {t} (k) \geqslant \alpha \sqrt {\frac {\log t}{\hat {n} _ {t} (k)}} \Bigg | \hat {n} _ {t} (k) = s\left. \right) \\ \leqslant \sum_ {j = 0} ^ {\lceil \frac {\log t}{\log (1 / \gamma)} \rceil} \mathbb {P} \left(\exists s \in \left(\gamma^ {j} t, \gamma^ {j + 1} t \right]: \mu (k) - \hat {\mu} _ {t} (k) \geqslant \alpha \sqrt {\frac {\log t}{\gamma^ {j} t}} \Bigg | \hat {n} _ {t} (k) = s\right) \\ \stackrel {(a)} {\leqslant} \sum_ {j = 0} ^ {\left\lceil \frac {\log t}{\log (1 / \gamma)} \right\rceil} \exp \left(- \frac {\frac {\alpha^ {2} \log t}{\gamma^ {j} t}}{2 \times (1 / 4 \gamma^ {j + 1} t)}\right) \\ \leqslant \sum_ {j = 0} ^ {\left\lceil \frac {\log t}{\log (1 / \gamma)} \right\rceil} \exp \left(- 2 \alpha^ {2} \gamma \log t\right) \\ \leqslant \left(2 + \frac {\log t}{\log (1 / \gamma)}\right) t ^ {- 2 \alpha^ {2} \gamma}, \\ \end{array}
525
+ $$
526
+
527
+ where the inequality (a) is from the maximal Hoeffding's inequality. Symmetrically, we also have
528
+
529
+ $$
530
+ \mathbb {P} \left(\mu (k) \leqslant \mathrm {t L C B} _ {t} (k, \alpha)\right) \leqslant \left(2 + \frac {\log t}{\log (1 / \gamma)}\right) t ^ {- 2 \alpha^ {2} \gamma}.
531
+ $$
532
+
533
+ Together, we have
534
+
535
+ $$
536
+ \mathbb {P} \left(\mu_ {k} \notin \left(\mathrm {t L C B} _ {t} (k, \alpha), \mathrm {t U C B} _ {t} (k, \alpha)\right) \leqslant 2 \left(2 + \frac {\log t}{\log (1 / \gamma)}\right) t ^ {- 2 \alpha^ {2} \gamma}. \right.
537
+ $$
538
+
539
+ ![](images/130fa8b7f8a62d7e1e0cbbe23ec4c80ee394e8666cac412c245e45db21ebd589.jpg)
540
+ Figure 2: Impact of parameter $\alpha$ on UCB-TCOM's communication time $\mathbb{E}\left[\mathrm{C_T}(\mathcal{A})\right]$ and group regret $\mathbb{E}[\mathrm{R_T}(\mathcal{A})]$ . When $\alpha \in (1, \sqrt{2})$ , UCB-TCOM achieves the near-optimal group and individual regret upper bounds with $O(\log (\log T))$ communications.
541
+
542
+ Substituting Lemma 2's proof's third step with the above tighter inequality, we have
543
+
544
+ $$
545
+ \begin{array}{l} \sum_ {t = 1} ^ {T} \sum_ {i = 1} ^ {M} \sum_ {k = 1} ^ {K} \mathbb {P} (\mu_ {k} \not \in (\mathsf {t L C B} _ {t} (k, \alpha), \mathsf {t U C B} _ {t} (k, \alpha)) \\ \leqslant M K \sum_ {t = 1} ^ {T} \mathbb {P} (\mu_ {k} \notin (t L C B _ {t} (k, \alpha), t U C B _ {t} (k, \alpha)) \\ \leqslant 2 M K \sum_ {t = 1} ^ {T} \left(2 + \frac {\log t}{\log (1 / \gamma)}\right) t ^ {- 2 \alpha^ {2} \gamma} \\ \leqslant 2 M K \left(1 + \int_ {t = 1} ^ {T} \left(2 + \frac {\log t}{\log (1 / \gamma)}\right) t ^ {- 2 \alpha^ {2} \gamma} d t\right) \\ \leqslant 2 M K \left(\frac {2 \alpha^ {2} \gamma + 1}{2 \alpha^ {2} \gamma - 1} + \frac {1}{\log (1 / \gamma) (2 \alpha^ {2} \gamma - 1) ^ {2}}\right), \\ \end{array}
546
+ $$
547
+
548
+ where the last inequality requires the exist of integral, i.e., $2\alpha^{2}\gamma > 1$ , from which one has $\alpha > \sqrt{\frac{1}{2\gamma}}$ and it becomes $\sqrt{\frac{1}{2}}$ when $\gamma \to 1$ .
549
+
550
+ # D TUNABILITY OF TCOM
551
+
552
+ # D.1 ALGORITHM DESIGN DISCUSSION
553
+
554
+ The parameter $\alpha$ tunes the aggressiveness of identifying an arm as suboptimal. When the condition is aggressive ( $\alpha$ is small), the communication arm set $\mathcal{C}_t^{(i)}(\alpha)$ is small: most arms inside $\mathcal{C}_t^{(i)}(\alpha)$ are suboptimal with high probability, but there may exist some suboptimal arms outside $\mathcal{C}_t^{(i)}(\alpha)$ , i.e., $\mathcal{C}_t^{(i)}(\alpha)$ fails to cover a subset of suboptimal arms. Therefore, the observations of those suboptimal arms outside the communication arm set cannot be shared, which causes more explorations on them. That is, the cooperative algorithm's regret may be large when a communication arm set with small $\alpha$ . When the condition is conservative ( $\alpha$ is large), $\mathcal{C}_t^{(i)}(\alpha)$ tends to be large: it contains not only most suboptimal arms but sometimes the optimal arm as well. As the observation times of optimal arm are also broadcast, the communication times would be large when $\alpha$ is large.
555
+
556
+ The parameter $\beta$ controls the frequency of communicating an arm's observation. When $\beta$ is large, communication times reduce at the expense of longer delays before other agents receive the observations. Therefore, the group regret increases. And vice versa when $\beta$ decreases.
557
+
558
+ # D.2 THEORETICAL DISCUSSION
559
+
560
+ Impact of communication parameter $\alpha$ . (1) Theorem 2(i) shows that when $\alpha \leq -\sqrt{2}$ , there is no communication among agents and thus there are no global observations for agents to optimize their decisions. (2) Theorems 2(iii) and 2(iv) show that when $\alpha > \frac{2\sqrt{2}\mu(1)}{\Delta_2} (>\sqrt{2})$ almost all
561
+
562
+ ![](images/703ed298c801ac33b30cead8685210618796d0d40600a71413f325c278b84567.jpg)
563
+ (a) Varying $\alpha$ (given $\beta = 2$ )
564
+
565
+ ![](images/09d88432a59e21da0f4aa235c78c04171656ea6613a5df35343042e80816cc9d.jpg)
566
+ (b) Varying $\beta$ (given $\alpha = 1.2$ )
567
+ Figure 3: Impact of communication set parameter $\alpha$ with fixed $\beta = 2$ in Figures 3a; and buffering ratio $\beta$ with fixed $\alpha = 1.2$ in Figures 3b
568
+
569
+ reward observations are broadcast. Hence the number of global observations that were broadcast is similar to that of total observations of agents. (3) Theorem 2(ii) shows that when $-\sqrt{2} < \alpha < \sqrt{2}$ , the number of broadcasts of the optimal arm's observations is $O(\log (\log T))$ . Additionally, the observation buffering mechanism also aggregates the suboptimal arms $O(\log T)$ observations into $O(\log (\log T))$ broadcasts. So, in this case, UCB-TCOM only incurs $O(\log (\log T))$ communication time. (4) Combined with (3), Theorem 1 shows that when $1 < \alpha < \sqrt{2}$ , UCB-TCOM has near-optimal group and individual regret upper bounds with only $O(\log (\log T))$ communication cost. We refer to the interval $(1,\sqrt{2})$ as the preferable range for $\alpha$ . Figure 2 summarizes the impact of parameter $\alpha$ on the regret-communication trade-off.
570
+
571
+ Impact of observation buffering ratio $\beta$ . In addition to $\alpha$ , the $\beta$ can also be used to tune the regret-communication trade-off. When the $\beta$ increases, Theorems 1 shows that both group and individual regrets increase. At the same time, note that $\beta$ is the logarithm base in (6) of Theorem 2(ii), which means the communication times of TCOM decreases as $\beta$ increases. Similarly, the regrets decrease and communication times increase as the ratio $\beta$ decreases.
572
+
573
+ # D.3 SIMULATIONS ON TUNABILITY
574
+
575
+ Figure 3 illustrates the impact of communication set parameter $\alpha$ and observation buffering ratio $\beta$ on the performance of UCB-TCOM. From regret aspect, Figure 3a shows that both the group and individual regrets do not change much as $\alpha$ increases, which confirms the regret upper bounds in Theorem 1 where $\alpha$ does not appear on the dominating logarithmic term of (4) and (5). Figure 3b shows that the regrets increase with respect to $\beta$ , which corresponds to the appearance of $\beta$ in the dominating logarithmic term of regret bounds in Theorem 1. From communication aspect, Figure 3a and Figure 3b also corroborate (6) of Theorem 2, where the communication times increase with respect to $\alpha$ and decreases with respect to $\beta$ .
576
+
577
+ # E SIMULATIONS ON THE PERFORMANCE OF UCB-TCOM IN DIFFERENT ENVIRONMENTS
578
+
579
+ In Figures 4a and 4b, we vary the number of agents $M$ in $\{5,25,45,65,85,105\}$ and the number of arms $K$ in $\{10,20,40,60,80,100\}$ respectively while fixing other default values. The group regret in Figure 4a is flat (compared to Figure 4b's group regret), corroborating that it does not increase rapidly with respect to number of agents $M$ (discussed in Section 4.2(b), also note that the second term in (4) does not depend on $T$ ). Its decreasing individual regret curve corroborates the scalability advantage of the multi-agent system — the more agents in the system, the smaller the maximum individual cost these agents need to pay (discussed in Section 4.2(c)). Figure 4b shows that both
580
+
581
+ Algorithm 2 The TS-UCB Algorithm (for each agent)
582
+ 1: Input: the communication arm set parameter $\alpha$ and buffering ratio $\beta$
583
+ 2: Initialization: $\hat{n}_t(k) = 0, N_t(k) = 0, \hat{\mu}_t(k) = 0, \tau_t(k) = 0$
584
+ 3: for each decision round $t$ do
585
+ 4: For each arm $k \in \mathcal{K}$ , sample $\theta_t(k)$ from the Beta $(\hat{n}_t(k) \hat{\mu}_t(k) + 1, \hat{n}_t(k) + 1)$ distribution.
586
+ 5: Pull arm $A_t$ with the highest $\theta_t(k)$
587
+ 6: Observe arm $A_t$ ’s reward $X_t(A_t)$
588
+ 7: if $A_t \in \mathcal{C}_t(\alpha)$ then
589
+ 8: Increase $N_t(A_t)$ by 1
590
+ 9: Update this phase's empirical mean $\tilde{\mu}_t(A_t)$
591
+ 10: end if
592
+ 11: if $N_t(A_t) \geqslant \lceil \beta N_{\tau_t(A_t)}(A_t) \rceil$ then
593
+ 12: Broadcast the message $(\tilde{\mu}_t(A_t), N_t(A_t), A_t)$
594
+ 13: $\tau_t(A_t) \gets t$
595
+ 14: end if
596
+ 15: end for
597
+ 16: for each newly received message $(\tilde{\mu}_t(k), N_t(k), k)$ from the past decision round do
598
+ 17: Update the empirical mean $\hat{\mu}_t(k) \gets \frac{\hat{\mu}_t(k) \hat{n}_t(k) + \hat{\mu}_t(k) [N_t(k)(1 - 1/\beta)]}{\hat{n}_t(k) + [N_t(k)(1 - 1/\beta)]}$
599
+ 18: Increase $\hat{n}_t(k)$ by $[N_t(k)(1 - 1/\beta)]$
600
+ 19: Update the communication arm set $\mathcal{C}_t(\alpha)$ via (3) based on tunable confidence bounds
601
+ 20: end for
602
+
603
+ group and individual regrets, and communication cost increase linearly as the number of arms $K$ increases, which signifies the linear dependence of regret on the number of arms.
604
+
605
+ ![](images/27d069351bfd2f94f4f960075fec150d4f2253d6a24b300c6c715afba0daa4a0.jpg)
606
+ (a) Different # agents $M$ $(K = 20)$
607
+
608
+ ![](images/9f502ceec1bc0d454e338448e0e60c3f81546c27483c7b668ef2554a88034e9b.jpg)
609
+ (b) Different # arms $K$ ( $M = 25$ )
610
+ Figure 4: The performance of UCB-TCOM with different number of agents and arms
611
+
612
+ # F APPLY TCOM TO OTHER BANDITS ALGORITHMS
613
+
614
+ In this section, we illustrate how TCOM can be applied to other bandits algorithms. Specifically, we devise the TS-TCOM algorithm based on the Thompson sampling (TS) algorithm (Arawal & Goyal, 2012) and the AAE-TCOM algorithm based on the active arm elimination (AAE) algorithm (Even-Dar et al., 2006).
615
+
616
+ For the ease of presenting TS-TCOM, we restrict our reward distributions to Bernoulli (same as our simulation setting) and assume the prior of all arms' rewards are $\mathrm{Beta}(1,1)$ . Then, at time $t$ , the arm $k$ 's reward posterior distribution is $\mathrm{Beta}(\hat{n}_t(k)\hat{\mu}_t(k) + 1,\hat{n}_t(k) + 1)$ , where the $\hat{n}_t(k)$ and $\hat{\mu}_t(k)$ are the global pulling times and the global reward mean estimate of arm $k$ respectively. We present TS-TCOM in Algorithm 2. The only difference between TS-TCOM and UCB-TCOM is in Line 4-5.
617
+
618
+ In AAE-TCOM, agents need to construct a global candidate arm set $\mathcal{D}_t$ which is defined as follows,
619
+
620
+ $$
621
+ \mathcal {D} _ {t} := \left\{k \in [ K ]: \mathrm {U C B} _ {t} (k) \geqslant \mathrm {L C B} _ {t} \left(k ^ {\prime}\right), \forall k ^ {\prime} \in [ K ] \right\}, \tag {13}
622
+ $$
623
+
624
+ Algorithm 3 The AAE-TCOM Algorithm (for each agent)
625
+ 1: Input: the communication arm set parameter $\alpha$ and buffering ratio $\beta$
626
+ 2: Initialization: $\hat{n}_t(k) = 0, N_t(k) = 0, \hat{\mu}_t(k) = 0, \tau_t(k) = 0$
627
+ 3: for each decision round $t$ do
628
+ 4: Update candidate arm set $\mathcal{D}_t$ via (13)
629
+ 5: Pull arm $A_t$ with the smallest global observation times $\hat{n}_t(k)$ among arms in $\mathcal{D}_t$
630
+ 6: Observe arm $A_t$ 's reward $X_t(A_t)$
631
+ 7: if $A_t \in \mathcal{C}_t(\alpha)$ then
632
+ 8: Increase $N_t(A_t)$ by 1
633
+ 9: Update this phase's empirical mean $\tilde{\mu}_t(A_t)$
634
+ 10: end if
635
+ 11: if $N_t(A_t) \geqslant \lceil \beta N_{\tau_t(A_t)}(A_t) \rceil$ then
636
+ 12: Broadcast the message ( $\tilde{\mu}_t(A_t), N_t(A_t), A_t$ )
637
+ 13: $\tau_t(A_t) \gets t$
638
+ 14: end if
639
+ 15: end for
640
+ 16: for each newly received message ( $\tilde{\mu}_t(k), N_t(k), k$ ) from the past decision round do
641
+ 17: Update the empirical mean $\hat{\mu}_t(k) \gets \frac{\hat{\mu}_t(k)\hat{n}_t(k) + \tilde{\mu}_t(k)\lfloor N_t(k)(1 - 1/\beta)\rfloor}{\hat{n}_t(k) + \lfloor N_t(k)(1 - 1/\beta)\rfloor}$
642
+ 18: Increase $\hat{n}_t(k)$ by $\lfloor N_t(k)(1 - 1/\beta)\rfloor$
643
+ 19: Update the communication arm set $\mathcal{C}_t(\alpha)$ via (3) based on tunable confidence bounds
644
+ 20: end for
645
+
646
+ ![](images/d826f503752f364494625e70a98265900312da77650d68e89285b8e3aa904cd6.jpg)
647
+ (a) Group regret
648
+
649
+ ![](images/3dfe7bf3a274a4f9b8307580cc22daf03ec6cca4433b533ecb4b4f60f713a0ab.jpg)
650
+ (b) Individual regret
651
+
652
+ ![](images/f553b0321baf11c3d1b33eb74d1ad45255389169e121f1a71484ca172c83314f.jpg)
653
+ (c) Communications
654
+ Figure 5:UCB-TCOM vs.AAE-TCOM,TS-TCOM
655
+
656
+ where $\mathrm{UCB}_t(k) \coloneqq \hat{\mu}_t(k) + \sqrt{2\log t / \hat{n}_t(k)}$ is the same as UCB-TCOM, and $\mathrm{LCB}_t(k) \coloneqq \hat{\mu}_t(k) - \sqrt{2\log t / \hat{n}_t(k)}$ . We note that since both $\hat{\mu}_t(k)$ and $\hat{n}_t(k)$ are global, all agents' candidate arm set are the same. Then, at each time slot $t$ , agents pull the arm with smallest observation times among all arms in the candidate arm set, i.e., $\arg \min_{k \in \mathcal{D}_t} \hat{n}_t(k)$ . We present AAE-TCOM in Algorithm 3. The difference between AAE-TCOM and UCB-TCOM is also in Line 4-5.
657
+
658
+ In Figure 5, we report the performance comparison between UCB-TCOM, TS-TCOM, and AAE-TCOM. The simulations are conducted under the default setting in §5. In Figure 5a and 5b, the AAE-TCOM algorithm has the worse regret performance than UCB-TCOM, and UCB-TCOM is worse than TS-TCOM. This matches the folklore in bandits that the empirical performance of TS is better than that of UCB, and UCB is better than AAE. Comparing the individual regret in Figure 5b to the group regret in Figure 5a shows that the AAE-TCOM and TS-TCOM algorithms have good individual regret performance. Figure 5c shows that these three algorithms need the similar communication times, which validates the communication efficiency of TS-TCOM and AAE-TCOM.
659
+
660
+ We believe that the TS-TCOM and AAE-TCOM algorithms (like UCB-TCOM) also enjoy the $O(\log (\log T))$ communication times and near-optimal group and individual regrets. However, this requires new analysis, especially for the Thompson sampling case, because its Bayesian approach is very different from the method utilized in UCB and TS-TCOM needs to validate the symmetric learning structure. Studying both new algorithms' theoretical performance is an interesting future work.
661
+
662
+ # G EXTENSION OF TCOM TO CMA2B WITH COMMUNICATION WITH DELAYS AND COMMUNICATION OVER A GRAPH
663
+
664
+ In this section, we extend the basic CMA2B model in §2 to allow deterministic communication delays and also from that the agents can broadcast to any others to the case that agents residing on a network can only communicate with their neighbors (i.e., cannot broadcast). In the communication network model, we also allow agents to passing its received messages to other neighbors, i.e., allow message-passing. We show that UCB-TCOM (with minor changes) still enjoys the near-optimal group and individual regret upper bounds as in Theorem 1 with only constant additional cost (independent of $T$ ). We also note that the communication times results in Theorem 2 still hold because its proofs are not influenced by these extensions.
665
+
666
+ # G.1 DETERMINISTIC DELAY
667
+
668
+ Denote $d \in \mathbb{N}^{+}$ as the deterministic delay of agents' message broadcasts. The deterministic delay can also be relaxed to random delay but with the $d$ as the delay upper bound. Algorithmically, the global UCB utilized for selecting arms to pull should be re-defined to those observations that have been broadcast $d$ time slots before. Because only these reward observations have been received by all agents even with the communication delay and thus agents utilize the same observations to make decisions.
669
+
670
+ Theorem 3. When the communication arm set parameter $\alpha > 1$ and buffering-ratio $\beta > 1$ , given all delays of communication is no greater than $d$ , UCB-TCOM attains a near-optimal group regret upper bound in terms of number of decision rounds $T$ , arms $K$ , and agents $M$ , or formally,
671
+
672
+ $$
673
+ \mathbb {E} \left[ \mathrm {R} _ {\mathrm {T}} (\mathcal {A}) \right] \leqslant \sum_ {k > 1} \frac {8 \beta \log T}{\Delta_ {k}} + M K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1} + d M \sum_ {k > 1} \Delta_ {k}
674
+ $$
675
+
676
+ and UCB-TCOM also attains a near-optimal individual regret upper bound, or formally,
677
+
678
+ $$
679
+ \mathbb {E} \left[ \mathrm {R} _ {\mathrm {T}} ^ {\text {i n d}} (\mathcal {A}) \right] \leqslant \sum_ {k > 1} \frac {8 \beta \log T}{M \Delta_ {k}} + K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1} + d \sum_ {k > 1} \Delta_ {k}.
680
+ $$
681
+
682
+ Proof of Theorem 3. The proof procedure of Theorem 1 still applies to Theorem 3. The only change is that the inequality (9) should be updated as follows,
683
+
684
+ $$
685
+ \sum_ {i \in [ M ]} N _ {t} ^ {i} (k) \stackrel {(a)} {\leqslant} \sum_ {i \in [ M ]} (\beta N _ {\tau_ {t} ^ {i} (k)} ^ {i} (k) + d) \leqslant \beta \sum_ {i \in [ M ]} N _ {\tau_ {t} (k)} (k) + d M \stackrel {(b)} {\leqslant} \frac {8 \beta \log T}{\Delta_ {k} ^ {2}} + d M,
686
+ $$
687
+
688
+ where inequality (a) is because the counter $N_{t}(k)$ is at most $\beta$ times greater than its value at the last broadcast time slot plus the delay $d$ , and inequality (b) is because $\sum_{i\in [M]}N_{\tau_t(k)}(k)$ is total global observations of arm $k$ at time slot $t$ which is less than $8\Delta_k^{-2}\log T$ by Lemma 3.
689
+
690
+ With the rest proof the same as Theorem 1's, UCB-TCOM's group regret is upper bounded as follows:
691
+
692
+ $$
693
+ \mathbb {E} [ \mathrm {R} _ {\mathrm {T}} (\mathcal {A}) ] \leqslant \sum_ {k > 1} \frac {8 \beta \log T}{\Delta_ {k}} + d M \sum_ {k > 1} \Delta_ {k} + M K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1}.
694
+ $$
695
+
696
+ As the symmetry of UCB-TCOM still holds, the individual regret upper bound immediately follows,
697
+
698
+ $$
699
+ \mathbb {E} \left[ \mathrm {R} _ {\mathrm {T}} ^ {\text {i n d}} (\mathcal {A}) \right] \leqslant \sum_ {k > 1} \frac {8 \beta \log T}{M \Delta_ {k}} + d \sum_ {k > 1} \Delta_ {k} + K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1}.
700
+ $$
701
+
702
+ ![](images/f182c0a1c8789e0c1ec9755ae57a3470e49fd2cfcb8c7173d7a9ced4a5d862ec.jpg)
703
+
704
+ # G.2 COMMUNICATION TOPOLOGY: PEER-TO-PEER ANDMESSAGE PASSING COMMUNICATION
705
+
706
+ We assume the communication network is a connected graph, i.e., there exists a path between any two nodes, and the diameter of the graph is $D$ . In the message-passing protocol (following Dubey
707
+
708
+ et al. (2020)'s model), agents $i\in [M]$ communication via messages $(i,t,\tilde{\mu}_t^i (A_t^i),N_t^i (A_t^i),A_t^i)$ . This message is sent to its neighbors in the graph and then is forwarded to by any agents when receives it to their neighbors until the time slot $t + D$ . Under this protocol, the broadcast message will final reach to any agents with a delay at most $D$ . In algorithmic aspect, the TCOM algorithm still works with minor modifications:
709
+
710
+ 1. Change the Line 11 in Algorithm 1 from broadcast to communication to neighbors;
711
+ 2. Add a line after Line 11 that an agent should also send all its received messages with time index $t' < t + D$ to neighbors on the graph;
712
+ 3. Line 15 should be changed to receive all messages that have not been received before (i.e., with unique $i, t$ prefix in the message).
713
+
714
+ With the above modifications in UCB-TCOM, the algorithm solves a problem equivalent to a CMA2B with communication delay $D$ on a complete graph. Therefore, based on Theorem 3, one obtains the following corollary to bound the group and individual regrets of the modified UCB-TCOM algorithm.
715
+
716
+ Corollary 4. When the communication arm set parameter $\alpha > 1$ and buffering-ratio $\beta > 1$ , given all agents residing on a network with diameter $D$ , UCB-TCOM attains a near-optimal group regret upper bound in terms of number of decision rounds $T$ , arms $K$ , and agents $M$ , or formally,
717
+
718
+ $$
719
+ \mathbb {E} [ \mathrm {R} _ {\mathrm {T}} (\mathcal {A}) ] \leqslant \sum_ {k > 1} \frac {8 \beta \log T}{\Delta_ {k}} + M K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1} + D M \sum_ {k > 1} \Delta_ {k}
720
+ $$
721
+
722
+ and UCB-TCOM also attains a near-optimal individual regret upper bound, or formally,
723
+
724
+ $$
725
+ \mathbb {E} \left[ \mathrm {R} _ {\mathrm {T}} ^ {\text {i n d}} (\mathcal {A}) \right] \leqslant \sum_ {k > 1} \frac {8 \beta \log T}{M \Delta_ {k}} + K \frac {2 \alpha^ {2} - 1}{\alpha^ {2} - 1} + D \sum_ {k > 1} \Delta_ {k}.
726
+ $$
achievingnearoptimalindividualregretlowcommunicationsinmultiagentbandits/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e870cfa5a3e1142ee09d95cc2e47908fcee9032f8bdfa33ef5ddfd4e8ad4f3f
3
+ size 861928
achievingnearoptimalindividualregretlowcommunicationsinmultiagentbandits/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec94ce0f51390c7b1ed0f1f5bc3df1ad603fa19b7e3ea55ed7bb9ebae7bc7153
3
+ size 1004754
achievingsublinearregretininfinitehorizonaveragerewardconstrainedmdpwithlinearfunctionapproximation/16555d2a-4150-4da8-9d23-69d64ca664ee_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8948f71aded7dc572313d9e4bccc738edc5a5f47e0f74f14b3c67c0a5d4db3c
3
+ size 368480
achievingsublinearregretininfinitehorizonaveragerewardconstrainedmdpwithlinearfunctionapproximation/16555d2a-4150-4da8-9d23-69d64ca664ee_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32776b0946450c33056d196216ef76a869a9c2793063618ebe0fee5ad94199dd
3
+ size 417672
achievingsublinearregretininfinitehorizonaveragerewardconstrainedmdpwithlinearfunctionapproximation/16555d2a-4150-4da8-9d23-69d64ca664ee_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50dc78305fb013133de5f1a107b64a459ab33f48f189b11f687de1614d6f7a4a
3
+ size 727221
achievingsublinearregretininfinitehorizonaveragerewardconstrainedmdpwithlinearfunctionapproximation/full.md ADDED
The diff for this file is too large to render. See raw diff
 
achievingsublinearregretininfinitehorizonaveragerewardconstrainedmdpwithlinearfunctionapproximation/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:803a14b10b13cee598f099b2bbad08babfe924e7dbd0c667d168b855dbc443cc
3
+ size 2602196
achievingsublinearregretininfinitehorizonaveragerewardconstrainedmdpwithlinearfunctionapproximation/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a30c084110c367e5261688b16f48e7c382c334327c99f4ca3bbb30da93dd316b
3
+ size 2293878
acriticallookattheevaluationofgnnsunderheterophilyarewereallymakingprogress/3e8811ba-7259-4c07-bd30-6f066f89108e_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02b4d9aa52d6f4e81a0ef4042676be7b53b2669fa49bdfaca603f080bab87a0b
3
+ size 88074
acriticallookattheevaluationofgnnsunderheterophilyarewereallymakingprogress/3e8811ba-7259-4c07-bd30-6f066f89108e_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46175201cf6943e09bdd199c09597f8d24ac47c4d59129a118408771aeb32048
3
+ size 106529