SlowGuess commited on
Commit
672566c
·
verified ·
1 Parent(s): ab68ce3

Add Batch 0d964477-6c06-48ba-974a-2d50656fed51

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. abaselineforfewshotimageclassification/55dfb78c-2c61-48ac-89df-52fe1bd63d06_content_list.json +3 -0
  2. abaselineforfewshotimageclassification/55dfb78c-2c61-48ac-89df-52fe1bd63d06_model.json +3 -0
  3. abaselineforfewshotimageclassification/55dfb78c-2c61-48ac-89df-52fe1bd63d06_origin.pdf +3 -0
  4. abaselineforfewshotimageclassification/full.md +449 -0
  5. abaselineforfewshotimageclassification/images.zip +3 -0
  6. abaselineforfewshotimageclassification/layout.json +3 -0
  7. abductivecommonsensereasoning/e26663a0-12fd-4b06-a09f-42f58683ef0d_content_list.json +3 -0
  8. abductivecommonsensereasoning/e26663a0-12fd-4b06-a09f-42f58683ef0d_model.json +3 -0
  9. abductivecommonsensereasoning/e26663a0-12fd-4b06-a09f-42f58683ef0d_origin.pdf +3 -0
  10. abductivecommonsensereasoning/full.md +379 -0
  11. abductivecommonsensereasoning/images.zip +3 -0
  12. abductivecommonsensereasoning/layout.json +3 -0
  13. abstractdiagrammaticreasoningwithmultiplexgraphnetworks/296a869c-fc52-41ed-b434-88f35abef136_content_list.json +3 -0
  14. abstractdiagrammaticreasoningwithmultiplexgraphnetworks/296a869c-fc52-41ed-b434-88f35abef136_model.json +3 -0
  15. abstractdiagrammaticreasoningwithmultiplexgraphnetworks/296a869c-fc52-41ed-b434-88f35abef136_origin.pdf +3 -0
  16. abstractdiagrammaticreasoningwithmultiplexgraphnetworks/full.md +317 -0
  17. abstractdiagrammaticreasoningwithmultiplexgraphnetworks/images.zip +3 -0
  18. abstractdiagrammaticreasoningwithmultiplexgraphnetworks/layout.json +3 -0
  19. acceleratingsgdwithmomentumforoverparameterizedlearning/4b89d651-c255-4e17-98d9-35e139e923c2_content_list.json +3 -0
  20. acceleratingsgdwithmomentumforoverparameterizedlearning/4b89d651-c255-4e17-98d9-35e139e923c2_model.json +3 -0
  21. acceleratingsgdwithmomentumforoverparameterizedlearning/4b89d651-c255-4e17-98d9-35e139e923c2_origin.pdf +3 -0
  22. acceleratingsgdwithmomentumforoverparameterizedlearning/full.md +0 -0
  23. acceleratingsgdwithmomentumforoverparameterizedlearning/images.zip +3 -0
  24. acceleratingsgdwithmomentumforoverparameterizedlearning/layout.json +3 -0
  25. acloserlookatdeeppolicygradients/529cce00-db0f-4816-97bb-f8e220d0a463_content_list.json +3 -0
  26. acloserlookatdeeppolicygradients/529cce00-db0f-4816-97bb-f8e220d0a463_model.json +3 -0
  27. acloserlookatdeeppolicygradients/529cce00-db0f-4816-97bb-f8e220d0a463_origin.pdf +3 -0
  28. acloserlookatdeeppolicygradients/full.md +475 -0
  29. acloserlookatdeeppolicygradients/images.zip +3 -0
  30. acloserlookatdeeppolicygradients/layout.json +3 -0
  31. acloserlookattheapproximationcapabilitiesofneuralnetworks/416e4db6-3318-4af2-9f5e-4dc105023431_content_list.json +3 -0
  32. acloserlookattheapproximationcapabilitiesofneuralnetworks/416e4db6-3318-4af2-9f5e-4dc105023431_model.json +3 -0
  33. acloserlookattheapproximationcapabilitiesofneuralnetworks/416e4db6-3318-4af2-9f5e-4dc105023431_origin.pdf +3 -0
  34. acloserlookattheapproximationcapabilitiesofneuralnetworks/full.md +523 -0
  35. acloserlookattheapproximationcapabilitiesofneuralnetworks/images.zip +3 -0
  36. acloserlookattheapproximationcapabilitiesofneuralnetworks/layout.json +3 -0
  37. acloserlookattheoptimizationlandscapesofgenerativeadversarialnetworks/57d54d98-a9af-48c4-a5a7-d695a362efd9_content_list.json +3 -0
  38. acloserlookattheoptimizationlandscapesofgenerativeadversarialnetworks/57d54d98-a9af-48c4-a5a7-d695a362efd9_model.json +3 -0
  39. acloserlookattheoptimizationlandscapesofgenerativeadversarialnetworks/57d54d98-a9af-48c4-a5a7-d695a362efd9_origin.pdf +3 -0
  40. acloserlookattheoptimizationlandscapesofgenerativeadversarialnetworks/full.md +529 -0
  41. acloserlookattheoptimizationlandscapesofgenerativeadversarialnetworks/images.zip +3 -0
  42. acloserlookattheoptimizationlandscapesofgenerativeadversarialnetworks/layout.json +3 -0
  43. aconstructivepredictionofthegeneralizationerroracrossscales/5fc9bbe4-4ad0-43cd-8f35-77d285d04164_content_list.json +3 -0
  44. aconstructivepredictionofthegeneralizationerroracrossscales/5fc9bbe4-4ad0-43cd-8f35-77d285d04164_model.json +3 -0
  45. aconstructivepredictionofthegeneralizationerroracrossscales/5fc9bbe4-4ad0-43cd-8f35-77d285d04164_origin.pdf +3 -0
  46. aconstructivepredictionofthegeneralizationerroracrossscales/full.md +521 -0
  47. aconstructivepredictionofthegeneralizationerroracrossscales/images.zip +3 -0
  48. aconstructivepredictionofthegeneralizationerroracrossscales/layout.json +3 -0
  49. acriticalanalysisofselfsupervisionorwhatwecanlearnfromasingleimage/34fda455-8a2c-410c-a1d4-5e379b1bc611_content_list.json +3 -0
  50. acriticalanalysisofselfsupervisionorwhatwecanlearnfromasingleimage/34fda455-8a2c-410c-a1d4-5e379b1bc611_model.json +3 -0
abaselineforfewshotimageclassification/55dfb78c-2c61-48ac-89df-52fe1bd63d06_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a7b00e7c1abbb06197452df2d9907e5f4a2988d02a4aea979f4b253e32ff1b8
3
+ size 131571
abaselineforfewshotimageclassification/55dfb78c-2c61-48ac-89df-52fe1bd63d06_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6482341fc7a23f9a45baa50b222d536864557bb5e0d041ab3a2faf1ee42bd30b
3
+ size 156802
abaselineforfewshotimageclassification/55dfb78c-2c61-48ac-89df-52fe1bd63d06_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c89bdc9a29b33b08b8edda69c10beb718fb0f205f601586c1eccd6aec08a4a5c
3
+ size 875211
abaselineforfewshotimageclassification/full.md ADDED
@@ -0,0 +1,449 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A BASELINE FOR FEW-SHOT IMAGE CLASSIFICATION
2
+
3
+ Guneet S. Dhillon<sup>1</sup>, Pratik Chaudhari<sup>2*</sup>, Avinash Ravichandran<sup>1</sup>, Stefano Soatto<sup>1,3</sup>
4
+
5
+ <sup>1</sup>Amazon Web Services, <sup>2</sup>University of Pennsylvania, <sup>3</sup>University of California, Los Angeles {guneetsd, ravinash, soattos} @amazon.com, pratikac@seas.upenn.edu
6
+
7
+ # ABSTRACT
8
+
9
+ Fine-tuning a deep network trained with the standard cross-entropy loss is a strong baseline for few-shot learning. When fine-tuned transductively, this outperforms the current state-of-the-art on standard datasets such as Mini-[ImageNet, Tiered-ImageNet, CIFAR-FS and FC-100 with the same hyper-parameters. The simplicity of this approach enables us to demonstrate the first few-shot learning results on the ImageNet-21k dataset. We find that using a large number of meta-training classes results in high few-shot accuracies even for a large number of few-shot classes. We do not advocate our approach as the solution for few-shot learning, but simply use the results to highlight limitations of current benchmarks and few-shot protocols. We perform extensive studies on benchmark datasets to propose a metric that quantifies the "hardness" of a few-shot episode. This metric can be used to report the performance of few-shot algorithms in a more systematic way.
10
+
11
+ # 1 INTRODUCTION
12
+
13
+ ![](images/4d1940ced8cbaf3f9cf8ec6cf63097ad961d8c957d612360479ab3f7ab346aff.jpg)
14
+ Figure 1: Are we making progress? The box-plot illustrates the performance of state-of-the-art few-shot algorithms on the Mini-ImageNet (Vinyals et al., 2016) dataset for the 1-shot 5-way protocol. The boxes show the $\pm 25\%$ quantiles of the accuracy while the notches indicate the median and its $95\%$ confidence interval. Whiskers denote the $1.5\times$ interquartile range which captures $99.3\%$ of the probability mass for a normal distribution. The spread of the box-plots are large, indicating that the standard deviations of the few-shot accuracies is large too. This suggests that progress may be illusory, especially considering that none outperform the simple transductive fine-tuning baseline discussed in this paper (rightmost).
15
+
16
+ As image classification systems begin to tackle more and more classes, the cost of annotating a massive number of images and the difficulty of procuring images of rare categories increases. This has fueled interest in few-shot learning, where only few labeled samples per class are available for training. Fig. 1 displays a snapshot of the state-of-the-art. We estimated this plot by using published
17
+
18
+ numbers for the estimate of the mean accuracy, the $95\%$ confidence interval of this estimate and the number of few-shot episodes. For MAML (Finn et al., 2017) and MetaOpt SVM (Lee et al., 2019), we use the number of episodes in the author's Github implementation.
19
+
20
+ The field appears to be progressing steadily albeit slowly based on Fig. 1. However, the variance of the estimate of the mean accuracy is not the same as the variance of the accuracy. The former can be zero (e.g., asymptotically for an unbiased estimator), yet the latter could be arbitrarily large. The variance of the accuracies is extremely large in Fig. 1. This suggests that progress in the past few years may be less significant than it seems if one only looks at the mean accuracies. To compound the problem, many algorithms report results using different models for different number of ways (classes) and shots (number of labeled samples per class), with aggressive hyper-parameter optimization. Our goal is to develop a simple baseline for few-shot learning, one that does not require specialized training depending on the number of ways or shots, nor hyper-parameter tuning for different protocols.
21
+
22
+ The simplest baseline we can think of is to pre-train a model on the meta-training dataset using the standard cross-entropy loss, and then fine-tune on the few-shot dataset. Although this approach is basic and has been considered before (Vinyals et al., 2016; Chen et al., 2018), it has gone unnoticed that it outperforms many sophisticated few-shot algorithms. Indeed, with a small twist of performing fine-tuning transductively, this baseline outperforms all state-of-the-art algorithms on all standard benchmarks and few-shot protocols (cf. Table 1).
23
+
24
+ Our contribution is to develop a transductive fine-tuning baseline for few-shot learning, our approach works even for a single labeled example and a single test datum per class. Our baseline outperforms the state-of-the-art on a variety of benchmark datasets such as Mini-ImageNet (Vinyals et al., 2016), Tiered-ImageNet (Ren et al., 2018), CIFAR-FS (Bertinetto et al., 2018) and FC-100 (Oreshkin et al., 2018), all with the same hyper-parameters. Current approaches to few-shot learning are hard to scale to large datasets. We report the first few-shot learning results on the ImageNet-21k dataset (Deng et al., 2009) which contains 14.2 million images across 21,814 classes. The rare classes in ImageNet-21k form a natural benchmark for few-shot learning.
25
+
26
+ The empirical performance of this baseline, should not be understood as us suggesting that this is the right way of performing few-shot learning. We believe that sophisticated meta-training, understanding taxonomies and meronomies, transfer learning, and domain adaptation are necessary for effective few-shot learning. The performance of the simple baseline however indicates that we need to interpret existing results<sup>2</sup> with a grain of salt, and be wary of methods that tailor to the benchmark. To facilitate that, we propose a metric to quantify the hardness of few-shot episodes and a way to systematically report performance for different few-shot protocols.
27
+
28
+ # 2 PROBLEM DEFINITION AND RELATED WORK
29
+
30
+ We first introduce some notation and formalize the few-shot image classification problem. Let $(x,y)$ denote an image and its ground-truth label respectively. The training and test datasets are $\mathcal{D}_{\mathrm{s}} = \{(x_i,y_i)\}_{i=1}^{N_{\mathrm{s}}}$ and $\mathcal{D}_{\mathrm{q}} = \{(x_i,y_i)\}_{i=1}^{N_{\mathrm{q}}}$ respectively, where $y_i \in C_{\mathrm{t}}$ for some set of classes $C_{\mathrm{t}}$ . In the few-shot learning literature, training and test datasets are referred to as support and query datasets respectively, and are collectively called a few-shot episode. The number of ways, or classes, is $|C_{\mathrm{t}}|$ . The set $\{x_i \mid y_i = k, (x_i,y_i) \in \mathcal{D}_{\mathrm{s}}\}$ is the support of class $k$ and its cardinality is $s$ support shots (this is non-zero and is generally shortened to shots). The number $s$ is small in the few-shot setting. The set $\{x_i \mid y_i = k, (x_i,y_i) \in \mathcal{D}_{\mathrm{q}}\}$ is the query of class $k$ and its cardinality is $q$ query shots. The goal is to learn a function $F$ to exploit the training set $\mathcal{D}_{\mathrm{s}}$ to predict the label of a test datum $x$ ,
31
+
32
+ where $(x,y)\in \mathcal{D}_{\mathbf{q}}$ ,by
33
+
34
+ $$
35
+ \hat {y} = F (x; \mathcal {D} _ {\mathrm {s}}). \tag {1}
36
+ $$
37
+
38
+ Typical approaches for supervised learning replace $\mathcal{D}_{\mathrm{s}}$ above with a statistic, $\theta^{*} = \theta^{*}(\mathcal{D}_{\mathrm{s}})$ that is, ideally, sufficient to classify $\mathcal{D}_{\mathrm{s}}$ , as measured by, say, the cross-entropy loss
39
+
40
+ $$
41
+ \theta^ {*} \left(\mathcal {D} _ {\mathrm {s}}\right) = \arg \min _ {\theta} \frac {1}{N _ {\mathrm {s}}} \sum_ {(x, y) \in \mathcal {D} _ {\mathrm {s}}} - \log p _ {\theta} (y | x), \tag {2}
42
+ $$
43
+
44
+ where $p_{\theta}(\cdot |x)$ is the probability distribution on $C_t$ as predicted by the model in response to input $x$ . When presented with a test datum, the classification rule is typically chosen to be of the form
45
+
46
+ $$
47
+ F _ {\theta *} (x; \mathcal {D} _ {\mathrm {s}}) \triangleq \underset {k} {\arg \max } p _ {\theta *} (k | x), \tag {3}
48
+ $$
49
+
50
+ where $\mathcal{D}_{\mathrm{s}}$ is represented by $\theta^{*}$ . This form of the classifier entails a loss of generality unless $\theta^{*}$ is a sufficient statistic, $p_{\theta^{*}}(y|x) = p(y|x)$ , which is of course never the case, especially given few labeled data in $\mathcal{D}_{\mathrm{s}}$ . However, it conveniently separates training and inference phases, never having to revisit the training set. This might be desirable in ordinary image classification, but not in few-shot learning. We therefore adopt the more general form of $F$ in (1).
51
+
52
+ If we call the test datum $x = x_{N_{\mathrm{s}} + 1}$ , then we can obtain the general form of the classifier by
53
+
54
+ $$
55
+ \hat {y} = F (x; \mathcal {D} _ {\mathrm {s}}) = \underset {y _ {N _ {\mathrm {s}} + 1}} {\arg \min } \underset {\theta} {\min } \frac {1}{N _ {\mathrm {s}} + 1} \sum_ {i = 1} ^ {N _ {\mathrm {s}} + 1} - \log p _ {\theta} \left(y _ {i} \mid x _ {i}\right). \tag {4}
56
+ $$
57
+
58
+ In addition to the training set, one typically also has a meta-training set, $\mathcal{D}_{\mathrm{m}} = \{(x_i,y_i)\}_{i = 1}^{N_{\mathrm{m}}}$ where $y_{i}\in C_{\mathrm{m}}$ with set of classes $C_\mathrm{m}$ disjoint from $C_t$ . The goal of meta-training is to use $\mathcal{D}_{\mathrm{m}}$ to infer the parameters of the few-shot learning model: $\hat{\theta} (\mathcal{D}_{\mathrm{m}};\left(\mathcal{D}_{\mathrm{s}},\mathcal{D}_{\mathrm{q}}\right)) = \arg \min_{\theta}\frac{1}{N_{\mathrm{m}}}\sum_{(x,y)\in \mathcal{D}_{\mathrm{m}}}\ell (y,F_{\theta}(x;\left(\mathcal{D}_{\mathrm{s}},\mathcal{D}_{\mathrm{q}}\right))),$ where meta-training loss $\ell$ depends on the method.
59
+
60
+ # 2.1 RELATED WORK
61
+
62
+ Learning to learn: The meta-training loss is designed to make few-shot training efficient (Utgoff, 1986; Schmidhuber, 1987; Baxter, 1995; Thrun, 1998). This approach partitions the problem into a base-level that performs standard supervised learning and a meta-level that accrues information from the base-level. Two main approaches have emerged to do so.
63
+
64
+ Gradient-based approaches: These approaches treat the updates of the base-level as a learnable mapping (Bengio et al., 1992). This mapping can be learnt using temporal models (Hochreiter et al., 2001; Ravi & Larochelle, 2016), or one can back-propagate the gradients across the base-level updates (Maclaurin et al., 2015; Finn et al., 2017). It is challenging to perform this dual or bi-level optimization, respectively. These approaches have not been shown to be competitive on large datasets. Recent approaches learn the base-level in closed-form using SVMs (Bertinetto et al., 2018; Lee et al., 2019) which restricts the capacity of the base-level although it alleviates the optimization problem.
65
+
66
+ Metric-based approaches: A majority of the state-of-the-art algorithms are metric-based approaches. These approaches learn an embedding that can be used to compare (Bromley et al., 1994; Chopra et al., 2005) or cluster (Vinyals et al., 2016; Snell et al., 2017) query samples. Recent approaches build upon this idea with increasing levels of sophistication in learning the embedding (Vinyals et al., 2016; Gidaris & Komodakis, 2018; Oreshkin et al., 2018), creating exemplars from the support set and picking a metric for the embedding (Gidaris & Komodakis, 2018; Allen et al., 2018; Ravichandran et al., 2019). There are numerous hyper-parameters involved in implementing these approaches which makes it hard to evaluate them systematically (Chen et al., 2018).
67
+
68
+ Transductive learning: This approach is more efficient at using few labeled data than supervised learning (Joachims, 1999; Zhou et al., 2004; Vapnik, 2013). The idea is to use information from the test datum $x$ to restrict the hypothesis space while searching for the classifier $F(x, \mathcal{D}_{\mathrm{s}})$ at test time. Our approach is closest to this line of work. We train a model on the meta-training set $\mathcal{D}_{\mathrm{m}}$ and
69
+
70
+ initialize a classifier using the support set $\mathcal{D}_{\mathrm{s}}$ . The parameters are then fine-tuned to adapt to the new test datum $x$ .
71
+
72
+ There are recent papers in few-shot learning such as Nichol et al. (2018); Liu et al. (2018a) that are motivated from transductive learning and exploit the unlabeled query samples. The former updates batch-normalization parameters using query samples while the latter uses label propagation to estimate labels of all query samples at once.
73
+
74
+ Semi-supervised learning: We penalize the Shannon Entropy of the predictions on the query samples at test time. This is a simple technique in the semi-supervised learning literature, closest to Grandvalet & Bengio (2005). Modern augmentation techniques such as Miyato et al. (2015); Sajjadi et al. (2016); Dai et al. (2017) or graph-based approaches (Kipf & Welling, 2016) can also be used with our approach; we used the entropic penalty for the sake of simplicity.
75
+
76
+ Semi-supervised few-shot learning is typically formulated as having access to extra unlabeled data during meta-training or few-shot training (Garcia & Bruna, 2017; Ren et al., 2018). This is different from our approach which uses the unlabeled query samples for transductive learning.
77
+
78
+ Initialization for fine-tuning: We use recent ideas from the deep metric learning literature (Hu et al., 2015; Movshovitz-Attias et al., 2017; Qi et al., 2018; Chen et al., 2018; Gidaris & Komodakis, 2018) to initialize the meta-trained model for fine-tuning. These works connect the softmax cross-entropy loss with cosine distance and are discussed further in Section 3.1.
79
+
80
+ # 3 APPROACH
81
+
82
+ The simplest form of meta-training is pre-training with the cross-entropy loss, which yields
83
+
84
+ $$
85
+ \hat {\theta} = \arg \min _ {\theta} \frac {1}{N _ {\mathrm {m}}} \sum_ {(x, y) \in \mathcal {D} _ {\mathrm {m}}} - \log p _ {\theta} (y | x) + R (\theta), \tag {5}
86
+ $$
87
+
88
+ where the second term denotes a regularizer, say weight decay $R(\theta) = \| \theta \| ^2 /2$ . The model predicts logits $z_{k}(x;\theta)$ for $k\in C_{\mathrm{m}}$ and the distribution $p_{\theta}(\cdot |x)$ is computed from these logits using the softmax operator. This loss is typically minimized by stochastic gradient descent-based algorithms.
89
+
90
+ If few-shot training is performed according to the general form in (4), then the optimization is identical to that above and amounts to fine-tuning the pre-trained model. However, the model needs to be modified to account for the new classes. Careful initialization can make this process efficient.
91
+
92
+ # 3.1 SUPPORT-BASED INITIALIZATION
93
+
94
+ Given the pre-trained model (called the "backbone"), $p_{\theta}$ (dropping the hat from $\hat{\theta}$ ), we append a new fully-connected "classifier" layer that takes the logits of the backbone as input and predicts the labels in $C_t$ . For a support sample $(x, y)$ , denote the logits of the backbone by $z(x; \theta) \in \mathbb{R}^{|C_m|}$ ; the weights and biases of the classifier by $w \in \mathbb{R}^{|C_t| \times |C_m|}$ and $b \in \mathbb{R}^{|C_t|}$ respectively; and the $k^{\text{th}}$ row of $w$ and $b$ by $w_k$ and $b_k$ respectively. The ReLU non-linearity is denoted by $(\cdot)_+$ .
95
+
96
+ If the classifier's logits are $z' = wz(x; \theta)_+ + b$ , the first term in the cross-entropy loss: $-\log p_{\Theta}(y|x) = -w_y z(x; \theta)_+ - b_y + \log \sum_k e^{w_k z(x; \theta)_+ + b_k}$ would be the cosine distance between $w_y$ and $z(x; \theta)_+$ if both were normalized to unit $\ell_2$ norm and bias $b_y = 0$ . This suggests
97
+
98
+ $$
99
+ w _ {y} = \frac {z (x ; \theta) _ {+}}{\| z (x ; \theta) _ {+} \|} \quad \text {a n d} \quad b _ {y} = 0 \tag {6}
100
+ $$
101
+
102
+ as a candidate for initializing the classifier, along with normalizing $z(x; \theta)_+$ to unit $\ell_2$ norm. It is easy to see that this maximizes the cosine similarity between features $z(x; \theta)_+$ and weights $w_y$ . For multiple support samples per class, we take the Euclidean average of features $z(x; \theta)_+$ for each class in $C_t$ , before $\ell_2$ normalization in (6). The logits of the classifier are thus given by
103
+
104
+ $$
105
+ \mathbb {R} ^ {\left| C _ {\mathrm {t}} \right|} \ni z (x; \Theta) = w \frac {z (x ; \theta) _ {+}}{\| z (x ; \theta) _ {+} \|} + b, \tag {7}
106
+ $$
107
+
108
+ where $\Theta = \{\theta, w, b\}$ , the combined parameters of the backbone and the classifier. Note that we have added a ReLU non-linearity between the backbone and the classifier, before the $\ell_2$ normalization. All the parameters $\Theta$ are trainable in the fine-tuning phase.
109
+
110
+ Remark 1 (Relation to weight imprinting). The support-based initialization is motivated from previous papers (Hu et al., 2015; Movshovitz-Attias et al., 2017; Chen et al., 2018; Gidaris & Komodakis, 2018). In particular, Qi et al. (2018) use a similar technique, with minor differences, to expand the size of the final fully-connected layer (classifier) for low-shot continual learning. The authors call their technique "weight imprinting" because $w_{k}$ can be thought of as a template for class $k$ . In our case, we are only interested in performing well on the few-shot classes.
111
+
112
+ Remark 2 (Using logits of the backbone instead of features as input to the classifier). A natural way to adapt the backbone to predict new classes is to re-initialize its final fully-connected layer (classifier). We instead append a new classifier after the logits of the backbone. This is motivated from Frosst et al. (2019) who show that for a trained backbone, outputs of all layers are entangled, without class-specific clusters; but the logits are peaked on the correct class, and are therefore well-clustered. The logits are thus better inputs to the classifier as compared to the features. We explore this choice via an experiment in Appendix C.6.
113
+
114
+ # 3.2 TRANSDUCTIVE FINE-TUNING
115
+
116
+ In (4), we assumed that there is a single query sample. However, we can also process multiple query samples together, and perform the minimization over all unknown query labels. We introduce a regularizer, similar to Grandvalet & Bengio (2005), as we seek outputs with a peaked posterior, or low Shannon Entropy $\mathbb{H}$ . So the transductive fine-tuning phase solves for
117
+
118
+ $$
119
+ \Theta^ {*} = \arg \min _ {\Theta} \frac {1}{N _ {\mathrm {s}}} \sum_ {(x, y) \in \mathcal {D} _ {\mathrm {s}}} - \log p _ {\Theta} (y \mid x) + \frac {1}{N _ {\mathrm {q}}} \sum_ {(x, y) \in \mathcal {D} _ {\mathrm {q}}} \mathbb {H} \left(p _ {\Theta} (\cdot \mid x)\right). \tag {8}
120
+ $$
121
+
122
+ Note that the data fitting term uses the labeled support samples whereas the regularizer uses the unlabeled query samples. The two terms can be highly imbalanced (due to the varying range of values for the two quantities, or due to the variance in their estimates which depend on $N_{\mathrm{s}}$ and $N_{\mathrm{q}}$ ). To allow finer control on this imbalance, one can use a coefficient for the entropic term and/or a temperature in the softmax distribution of the query samples. Tuning these hyper-parameters per dataset and few-shot protocol leads to uniform improvements in the results in Section 4 by $1 - 2\%$ . However, we wish to keep in line with our goal of developing a simple baseline and refrain from optimizing these hyper-parameters, and set them equal to 1 for all experiments on benchmark datasets.
123
+
124
+ # 4 EXPERIMENTAL RESULTS
125
+
126
+ We show results of transductive fine-tuning on benchmark datasets in few-shot learning, namely Mini-ImageNet (Vinyals et al., 2016), Tiered-ImageNet (Ren et al., 2018), CIFAR-FS (Bertinetto et al., 2018) and FC-100 (Oreshkin et al., 2018), in Section 4.1. We also show large-scale experiments on the ImageNet-21k dataset (Deng et al., 2009) in Section 4.2. Along with the analysis in Section 4.3, these help us design a metric that measures the hardness of an episode in Section 4.4. We sketch key points of the experimental setup here; see Appendix A for details.
127
+
128
+ Pre-training: We use the WRN-28-10 (Zagoruyko & Komodakis, 2016) model as the backbone. We pre-train using standard data augmentation, cross-entropy loss with label smoothing (Szegedy et al., 2016) of $\epsilon = 0.1$ , mixup regularization (Zhang et al., 2017) of $\alpha = 0.25$ , SGD with batch-size of 256, Nesterov's momentum of 0.9, weight-decay of $10^{-4}$ and no dropout. We use batch-normalization (Ioffe & Szegedy, 2015) but exclude its parameters from weight decay (Jia et al., 2018). We use cyclic learning rates (Smith, 2017) and half-precision distributed training on 8 GPUs (Howard et al., 2018) to reduce training time.
129
+
130
+ Each dataset has a training, validation and test set consisting of disjoint sets of classes. Some algorithms use only the training set as the meta-training set (Snell et al., 2017; Oreshkin et al., 2018), while others use both training and validation sets (Rusu et al., 2018). For completeness we report
131
+
132
+ results using both methodologies; the former is denoted as (train) while the latter is denoted as (train + val). All experiments in Sections 4.3 and 4.4 use the (train + val) setting.
133
+
134
+ Fine-tuning: We perform fine-tuning on one GPU in full-precision for 25 epochs and a fixed learning rate of $5 \times 10^{-5}$ with Adam (Kingma & Ba, 2014) without any regularization. We make two weight updates in each epoch: one for the cross-entropy term using support samples and one for the Shannon Entropy term using query samples (cf. (8)).
135
+
136
+ Hyper-parameters: We used images from ImageNet-1k belonging to the training classes of MiniImageNet as the validation set for pre-training the backbone for Mini-ImageNet. We used the validation set of Mini-ImageNet to choose hyper-parameters for fine-tuning. All hyper-parameters are kept constant for experiments on benchmark datasets.
137
+
138
+ Evaluation: Few-shot episodes contain classes sampled uniformly from classes in the test sets of the respective datasets; support and query samples are further sampled uniformly for each class; the query shot is fixed to 15 for all experiments unless noted otherwise. All networks are evaluated over 1,000 few-shot episodes unless noted otherwise. To enable easy comparison with existing literature, we report an estimate of the mean accuracy and the $95\%$ confidence interval of this estimate. However, we encourage reporting the standard deviation in light of Section 1 and Fig. 1.
139
+
140
+ # 4.1 RESULTS ON BENCHMARK DATASETS
141
+
142
+ Table 1: Few-shot accuracies on benchmark datasets for 5-way few-shot episodes. The notation conv $(64^{k})_{\times 4}$ denotes a CNN with 4 layers and $64^{k}$ channels in the $k^{\mathrm{th}}$ layer. Best results in each column are shown in bold. Results where the support-based initialization is better than or comparable to existing algorithms are denoted by $\dagger$ . The notation (train + val) indicates that the backbone was pre-trained on both training and validation sets of the datasets; the backbone is trained only on the training set otherwise. (Lee et al., 2019) uses a $1.25\times$ wider ResNet-12 which we denote as ResNet-12*.
143
+
144
+ <table><tr><td rowspan="2">Algorithm</td><td rowspan="2">Architecture</td><td colspan="3">Mini-ImageNet</td><td colspan="3">Tiered-ImageNet</td><td colspan="3">CIFAR-FS</td><td colspan="2">FC-100</td></tr><tr><td>1-shot (%)</td><td>5-shot (%)</td><td></td><td>1-shot (%)</td><td>5-shot (%)</td><td></td><td>1-shot (%)</td><td>5-shot (%)</td><td></td><td>1-shot (%)</td><td>5-shot (%)</td></tr><tr><td>Matching networks (Vinyals et al., 2016)</td><td>conv (64)×4</td><td>46.6</td><td>60</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>LSTM meta-learner (Ravi &amp; Larochelle, 2016)</td><td>conv (64)×4</td><td>43.44 ± 0.77</td><td>60.60 ± 0.71</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Prototypical Networks (Snell et al., 2017)</td><td>conv (64)×4</td><td>49.42 ± 0.78</td><td>68.20 ± 0.66</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>MAML (Finn et al., 2017)</td><td>conv (32)×4</td><td>48.70 ± 1.84</td><td>63.11 ± 0.92</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>R2D2 (Bertinetto et al., 2018)</td><td>conv (96k)×4</td><td>51.8 ± 0.2</td><td>68.4 ± 0.2</td><td></td><td></td><td></td><td></td><td>65.4 ± 0.2</td><td>79.4 ± 0.2</td><td></td><td></td><td></td></tr><tr><td>TADAM (Oreshkin et al., 2018)</td><td>ResNet-12</td><td>58.5 ± 0.3</td><td>76.7 ± 0.3</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>40.1 ± 0.4</td><td>56.1 ± 0.4</td></tr><tr><td>Transductive Propagation (Liu et al., 2018b)</td><td>conv (64)×4</td><td>55.51 ± 0.86</td><td>69.86 ± 0.65</td><td></td><td>59.91 ± 0.94</td><td>73.30 ± 0.75</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Transductive Propagation (Liu et al., 2018b)</td><td>ResNet-12</td><td>59.46</td><td>75.64</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>MetaOpt SVM (Lee et al., 2019)</td><td>ResNet-12 *</td><td>62.64 ± 0.61</td><td>78.63 ± 0.46</td><td></td><td>65.99 ± 0.72</td><td>81.56 ± 0.53</td><td></td><td>72.0 ± 0.7</td><td>84.2 ± 0.5</td><td></td><td>41.1 ± 0.6</td><td>55.5 ± 0.6</td></tr><tr><td>Support-based initialization (train)</td><td>WRN-28-10</td><td>56.17 ± 0.64</td><td>73.31 ± 0.53</td><td></td><td>67.45 ± 0.70†</td><td>82.88 ± 0.53†</td><td></td><td>70.26 ± 0.70</td><td>83.82 ± 0.49†</td><td></td><td>36.82 ± 0.51</td><td>49.72 ± 0.55</td></tr><tr><td>Fine-tuning (train)</td><td>WRN-28-10</td><td>57.73 ± 0.62</td><td>78.17 ± 0.49</td><td></td><td>66.58 ± 0.70</td><td>85.55 ± 0.48</td><td></td><td>68.72 ± 0.67</td><td>86.11 ± 0.47</td><td></td><td>38.25 ± 0.52</td><td>57.19 ± 0.57</td></tr><tr><td>Transductive fine-tuning (train)</td><td>WRN-28-10</td><td>65.73 ± 0.68</td><td>78.40 ± 0.52</td><td></td><td>73.34 ± 0.71</td><td>85.50 ± 0.50</td><td></td><td>76.58 ± 0.68</td><td>85.79 ± 0.50</td><td></td><td>43.16 ± 0.59</td><td>57.57 ± 0.55</td></tr><tr><td>Activation to Parameter (Qiao et al., 2018) (train + val)</td><td>WRN-28-10</td><td>59.60 ± 0.41</td><td>73.74 ± 0.19</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>LEO (Rusu et al., 2018) (train + val)</td><td>WRN-28-10</td><td>61.76 ± 0.08</td><td>77.59 ± 0.12</td><td></td><td>66.33 ± 0.05</td><td>81.44 ± 0.09</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>MetaOpt SVM (Lee et al., 2019) (train + val)</td><td>ResNet-12 *</td><td>64.09 ± 0.62</td><td>80.00 ± 0.45</td><td></td><td>65.81 ± 0.74</td><td>81.75 ± 0.53</td><td></td><td>72.8 ± 0.7</td><td>85.0 ± 0.5</td><td></td><td>47.2 ± 0.6</td><td>62.5 ± 0.6</td></tr><tr><td>Support-based initialization (train + val)</td><td>WRN-28-10</td><td>58.47 ± 0.66</td><td>75.56 ± 0.52</td><td></td><td>67.34 ± 0.69†</td><td>83.32 ± 0.51†</td><td></td><td>72.14 ± 0.69†</td><td>85.21 ± 0.49†</td><td></td><td>45.08 ± 0.61</td><td>60.05 ± 0.60</td></tr><tr><td>Fine-tuning (train + val)</td><td>WRN-28-10</td><td>59.62 ± 0.66</td><td>79.93 ± 0.47</td><td></td><td>66.23 ± 0.68</td><td>86.08 ± 0.47</td><td></td><td>70.07 ± 0.67</td><td>87.26 ± 0.45</td><td></td><td>43.80 ± 0.58</td><td>64.40 ± 0.58</td></tr><tr><td>Transductive fine-tuning (train + val)</td><td>WRN-28-10</td><td>68.11 ± 0.69</td><td>80.36 ± 0.50</td><td></td><td>72.87 ± 0.71</td><td>86.15 ± 0.50</td><td></td><td>78.36 ± 0.70</td><td>87.54 ± 0.49</td><td></td><td>50.44 ± 0.68</td><td>65.74 ± 0.60</td></tr></table>
145
+
146
+ Table 1 shows the results of transductive fine-tuning on benchmark datasets for standard few-shot protocols. We see that this simple baseline is uniformly better than state-of-the-art algorithms. We include results for support-based initialization, which does no fine-tuning; and for fine-tuning, which involves optimizing only the cross-entropy term in (8) using the labeled support samples.
147
+
148
+ The support-based initialization is sometimes better than or comparable to state-of-the-art algorithms (marked $\dagger$ ). The few-shot literature has gravitated towards larger backbones (Rusu et al., 2018). Our results indicate that for large backbones even standard cross-entropy pre-training and support-based initialization work well, similar to observation made by Chen et al. (2018).
149
+
150
+ For the 1-shot 5-way setting, fine-tuning using only the labeled support examples leads to minor improvement over the initialization, and sometimes marginal degradation. However, for the 5-shot 5-way setting non-transductive fine-tuning is better than the state-of-the-art.
151
+
152
+ In both (train) and (train + val) settings, transductive fine-tuning leads to $2 - 7\%$ improvement for 1-shot 5-way setting over the state-of-the-art for all datasets. It results in an increase of $1.5 - 4\%$ for the 5-shot 5-way setting except for the Mini-[ImageNet dataset, where the performance is matched. This suggests that the use of the unlabeled query samples is vital for the few-shot setting.
153
+
154
+ For the Mini-ImageNet, CIFAR-FS and FC-100 datasets, using additional data from the validation set to pre-train the backbone results in $2 - 8\%$ improvements; the improvement is smaller for Tiered-ImageNet. This suggests that having more pre-training classes leads to improved few-shot performance as a consequence of a better embedding. See Appendix C.5 for more experiments.
155
+
156
+ # 4.2 LARGE-SCALE FEW-SHOT LEARNING
157
+
158
+ The ImageNet-21k dataset (Deng et al., 2009) with 14.2M images across 21,814 classes is an ideal large-scale few-shot learning benchmark due to the high class imbalance. The simplicity of our approach allows us to present the first few-shot learning results on this large dataset. We use the 7,491 classes having more than 1,000 images each as the meta-training set and the next 13,007 classes with at least 10 images each for constructing few-shot episodes. See Appendix B for details.
159
+
160
+ Table 2: Accuracy (%) on the few-shot data of ImageNet-21k. The confidence intervals are large because we compute statistics only over 80 few-shot episodes so as to test for large number of ways.
161
+
162
+ <table><tr><td>Algorithm</td><td>Model</td><td>Shot</td><td>5</td><td>10</td><td>20</td><td>40</td><td>80</td><td>160</td></tr><tr><td>Support-based initialization</td><td>WRN-28-10</td><td>1</td><td>87.20 ± 1.72</td><td>78.71 ± 1.63</td><td>69.48 ± 1.30</td><td>60.55 ± 1.03</td><td>49.15 ± 0.68</td><td>40.57 ± 0.42</td></tr><tr><td>Transductive fine-tuning</td><td>WRN-28-10</td><td>1</td><td>89.00 ± 1.86</td><td>79.88 ± 1.70</td><td>69.66 ± 1.30</td><td>60.72 ± 1.04</td><td>48.88 ± 0.66</td><td>40.46 ± 0.44</td></tr><tr><td>Support-based initialization</td><td>WRN-28-10</td><td>5</td><td>95.73 ± 0.84</td><td>91.00 ± 1.09</td><td>84.77 ± 1.04</td><td>78.10 ± 0.79</td><td>70.09 ± 0.71</td><td>61.93 ± 0.45</td></tr><tr><td>Transductive fine-tuning</td><td>WRN-28-10</td><td>5</td><td>95.20 ± 0.94</td><td>90.61 ± 1.03</td><td>84.21 ± 1.09</td><td>77.13 ± 0.82</td><td>68.94 ± 0.75</td><td>60.11 ± 0.48</td></tr></table>
163
+
164
+ Table 2 shows the mean accuracy of transductive fine-tuning evaluated over 80 few-shot episodes on ImageNet-21k. The accuracy is extremely high as compared to corresponding results in Table 1 even for large way. E.g., the 1-shot 5-way accuracy on Tiered-ImageNet is $72.87 \pm 0.71\%$ while it is $89 \pm 1.86\%$ here. This corroborates the results in Section 4.1 and indicates that pre-training with a large number of classes may be an effective strategy to build large-scale few-shot learning systems.
165
+
166
+ The improvements of transductive fine-tuning are minor for ImageNet-21k because the support-based initialization accuracies are extremely high. We noticed a slight degradation of accuracies due to transductive fine-tuning at high ways because the entropic term in (8) is much larger than the cross-entropy loss. The experiments for ImageNet-21k therefore scale down the entropic term by $\log |C_{\mathrm{t}}|$ and forego the ReLU in (6) and (7). This reduces the difference in accuracies at high ways.
167
+
168
+ # 4.3 ANALYSIS
169
+
170
+ This section presents a comprehensive analysis of transductive fine-tuning on the Mini-ImageNet, Tiered-ImageNet and ImageNet-21k datasets.
171
+
172
+ Robustness of transductive fine-tuning to query shot: Fig. 2a shows the effect of changing the query shot on the mean accuracy. For the 1-shot 5-way setting, the entropic penalty in (8) helps as the query shot increases. This effect is minor in the 5-shot 5-way setting as more labeled data is available. Query shot of 1 achieves a relatively high mean accuracy because transductive fine-tuning can adapt to those few queries. One query shot is enough to benefit from transductive fine-tuning: for Mini-ImageNet, the 1-shot 5-way accuracy with query shot of 1 is $66.94 \pm 1.55\%$ which is better than non-transductive fine-tuning $(59.62 \pm 0.66\%)$ in Table 1) and higher than other approaches.
173
+
174
+ Performance for different way and support shot: A few-shot system should be able to robustly handle different few-shot scenarios. Figs. 2b and 2c, show the performance of transductive fine-tuning
175
+
176
+ ![](images/6ddc6a632666a3a9f63f360a48785667d5022e7d38016dc2b96b0276b54af88e.jpg)
177
+ (a)
178
+
179
+ ![](images/13a8adc3a7265ad8f5fecc31df68d26e756b367777df2057dcbaa2d28fcdec89.jpg)
180
+ (b)
181
+
182
+ ![](images/2edb17fa6ac3c83873904c803eb006fd65fe3d766cfb688c8f6f0d310c455634.jpg)
183
+ (c)
184
+ Figure 2: Mean accuracy of transductive fine-tuning for different query shot, way and support shot. Fig. 2a shows that the mean accuracy improves with query shot if the support shot is low; this effect is minor for Tiered-ImageNet. The mean accuracy for query shot of 1 is high because transductive fine-tuning can specialize to those queries. Fig. 2b shows that the mean accuracy degrades logarithmically with way for fixed support shot and query shot (15). Fig. 2c suggests that the mean accuracy improves logarithmically with the support shot for fixed way and query shot (15). These trends suggest thumb rules for building few-shot systems.
185
+
186
+ with changing way and support shot. The mean accuracy changes logarithmically with the way and support shot which provides thumb rules for building few-shot systems.
187
+
188
+ Different backbone architectures: We include experiments using conv $(64)_{\times 4}$ (Vinyals et al., 2016) and ResNet-12 (He et al., 2016a; Oreshkin et al., 2018) in Table 3, in order to facilitate comparisons for different backbone architectures. The results for transductive fine-tuning are comparable or better than state-of-the-art for a given backbone architecture, except for those in Liu et al. (2018b) who use a more sophisticated transductive algorithm using graph propagation, with conv $(64)_{\times 4}$ . In line with our goal for simplicity, we kept the hyper-parameters for pre-training and fine-tuning the same as the ones used for WRN-28-10 (cf. Sections 3 and 4). These results show that transductive fine-tuning is a sound baseline for a variety of backbone architectures.
189
+
190
+ Computational complexity: There is no free lunch and our advocated baseline has its limitations. It performs gradient updates during the fine-tuning phase which makes it slow at inference time. Specifically, transductive fine-tuning is about $300 \times$ slower (20.8 vs. 0.07 seconds) for a 1-shot 5-way episode with 15 query shot as compared to Snell et al. (2017) with the same backbone architecture (prototypical networks (Snell et al., 2017) do not update model parameters at inference time). The latency factor reduces with higher support shot. Interestingly, for a single query shot, the former takes 4 seconds vs. 0.07 seconds. This is a more reasonable factor of $50 \times$ , especially considering that the mean accuracy of the former is $66.2\%$ compared to about $58\%$ of the latter in our implementation. Experiments in Appendix C.3 suggest that using a smaller backbone architecture partially compensates for the latency with some degradation of accuracy. A number of approaches such as Ravi & Larochelle (2016); Finn et al. (2017); Rusu et al. (2018); Lee et al. (2019) also perform additional processing at inference time and are expected to be slow, along with other transductive approaches (Nichol et al., 2018; Liu et al., 2018b). Additionally, support-based initialization has the same inference time as Snell et al. (2017).
191
+
192
+ # 4.4 A PROPOSAL FOR REPORTING FEW-SHOT CLASSIFICATION PERFORMANCE
193
+
194
+ As discussed in Section 1, we need better metrics to report the performance of few-shot algorithms. There are two main issues: (i) standard deviation of the few-shot accuracy across different sampled episodes for a given algorithm, dataset and few-shot protocol is very high (cf. Fig. 1), and (ii) different models and hyper-parameters for different few-shot protocols makes evaluating algorithmic contributions difficult (cf. Table 1). This section takes a step towards resolving these issues.
195
+
196
+ Hardness of an episode: Classification performance on a few-shot episode is determined by the relative location of the features corresponding to labeled and unlabeled samples. If the unlabeled
197
+
198
+ features are close to the labeled features from the same class, the classifier can distinguish between the classes easily to obtain a high accuracy. Otherwise, the accuracy would be low. The following definition characterizes this intuition.
199
+
200
+ For training (support) set $\mathcal{D}_{\mathrm{s}}$ and test (query) set $\mathcal{D}_{\mathrm{q}}$ , we will define the hardness $\Omega_{\varphi}$ as the average log-odds of a test datum being classified incorrectly. More precisely,
201
+
202
+ $$
203
+ \Omega_ {\varphi} \left(\mathcal {D} _ {\mathrm {q}}; \mathcal {D} _ {\mathrm {s}}\right) = \frac {1}{N _ {\mathrm {q}}} \sum_ {(x, y) \in \mathcal {D} _ {\mathrm {q}}} \log \frac {1 - p (y \mid x)}{p (y \mid x)}, \tag {9}
204
+ $$
205
+
206
+ where $p(\cdot \mid x)$ is a softmax distribution with logits $z_{y} = w\varphi (x)$ . $w$ is the weight matrix constructed using (6) and $\mathcal{D}_{\mathrm{s}}$ ; and $\varphi$ is the $\ell_2$ normalized logits computed using a rich-enough feature generator, say a deep network trained for standard image classification. This is a clustering loss where the labeled support samples form class-specific cluster centers. The cluster affinities are calculated using cosine-similarities, followed by the softmax operator to get the probability distribution $p(\cdot \mid x)$ .
207
+
208
+ Note that $\Omega_{\varphi}$ does not depend on the few-shot learner and gives a measure of how difficult the classification problem is for any few-shot episode, using a generic feature extractor.
209
+
210
+ ![](images/a80f4367f5a0e1c3deae8c92e0876a9d2496ca5d19062135209159e6774ed522.jpg)
211
+ Figure 3: Comparing the accuracy of transductive fine-tuning (solid lines) vs. support-based initialization (dotted lines) for different datasets, ways (5, 10, 20, 40, 80 and 160) and support shots (1 and 5). Abscissae are computed using (9) and a Resnet-152 (He et al., 2016b) network trained for standard image classification on the ImageNet-1k dataset. Each marker indicates the accuracy of transductive fine-tuning on a few-shot episode; markers for support-based initialization are hidden to avoid clutter. Shape of the markers denotes different ways; ways increase from left to right (5, 10, 20, 40, 80 and 160). Size of the markers denotes different support shot (1 and 5); it increases from the bottom to the top. E.g., the ellipse contains accuracies of different 5-shot 10-way episodes for ImageNet-21k. Regression lines are drawn for each algorithm and dataset by combining the episodes of all few-shot protocols. This plot is akin to a precision-recall curve and allows comparing two algorithms for different few-shot scenarios. The areas in the first quadrant under the fitted regression lines are 295 vs. 284 (CIFAR-FS), 167 vs. 149 (FC-100), 208 vs. 194 (Mini-ImageNet), 280 vs. 270 (Tiered-ImageNet) and 475 vs. 484 (ImageNet-21k) for transductive fine-tuning and support-based initialization.
212
+
213
+ Fig. 3 demonstrates how to use the hardness metric. Few-shot accuracy degrades linearly with hardness. Performance for all hardness can thus be estimated by testing for two different ways. We advocate selecting hyper-parameters using the area under the fitted curve as a metric instead of tuning them specifically for each few-shot protocol. The advantage of such a test methodology is that it predicts the performance of the model across multiple few-shot protocols systematically.
214
+
215
+ Different algorithms can be compared directly, e.g., transductive fine-tuning (solid lines) and support-based initialization (dotted lines). For instance, the former leads to large improvements on easy episodes, the performance is similar for hard episodes, especially for Tiered-ImageNet and ImageNet-21k.
216
+
217
+ The high standard deviation of accuracy of few-shot learning algorithms in Fig. 1 can be seen as the spread of the cluster corresponding to each few-shot protocol, e.g., the ellipse in Fig. 3 denotes the 5-shot 10-way protocol for ImageNet-21k. It is the nature of few-shot learning that episodes have varying hardness even if the way and shot are fixed. However, episodes within the ellipse lie on a different line (with a large negative slope) which indicates that given a few-shot protocol, hardness is a good indicator of accuracy.
218
+
219
+ Fig. 3 also shows that due to fewer test classes, CIFAR-FS, FC-100 and Mini-[ImageNet have less diversity in the hardness of episodes while Tiered-[ImageNet and ImageNet-21k allow sampling of both very hard and very easy diverse episodes. For a given few-shot protocol, the hardness of episodes in the former three is almost the same as that of the latter two datasets. This indicates that CIFAR-FS, FC-100 and Mini-[ImageNet may be good benchmarks for applications with few classes.
220
+
221
+ The hardness metric in (9) naturally builds upon existing ideas in deep metric learning (Qi et al., 2018). We propose it as a means to evaluate few-shot learning algorithms uniformly across different few-shot protocols for different datasets; ascertaining its efficacy and comparisons to other metrics will be part of future work.
222
+
223
+ # 5 DISCUSSION
224
+
225
+ Our aim is to provide grounding to the practice of few-shot learning. The current literature is in the spirit of increasingly sophisticated approaches for modest improvements in mean accuracy using an inadequate evaluation methodology. This is why we set out to establish a baseline, namely transductive fine-tuning, and a systematic evaluation methodology, namely the hardness metric. We would like to emphasize that our advocated baseline, namely transductive fine-tuning, is not novel and yet performs better than existing algorithms on all standard benchmarks. This is indeed surprising and indicates that we need to take a step back and re-evaluate the status quo in few-shot learning. We hope to use the results in this paper as guidelines for the development of new algorithms.
226
+
227
+ # REFERENCES
228
+
229
+ Kelsey R Allen, Hanul Shin, Evan Shelhamer, and Josh B Tenenbaum. Variadic learning by bayesian nonparametric deep embedding. 2018.
230
+ Jonathan Baxter. Learning internal representations. Flinders University of S. Aust., 1995.
231
+ Samy Bengio, Yoshua Bengio, Jocelyn Cloutier, and Jan Gecsei. On the optimization of a synaptic learning rule. In Preprints Conf. Optimality in Artificial and Biological Neural Networks, pp. 6-8. Univ. of Texas, 1992.
232
+ Luca Bertinetto, João F Henriques, Philip HS Torr, and Andrea Vedaldi. Meta-learning with differentiable closed-form solvers. arXiv:1805.08136, 2018.
233
+ Jane Bromley, Isabelle Guyon, Yann LeCun, Eduard Säckinger, and Roopak Shah. Signature verification using a" siamese" time delay neural network. In Advances in neural information processing systems, pp. 737-744, 1994.
234
+ Wei-Yu Chen, Yen-Cheng Liu, Zsolt Kira, Yu-Chiang Frank Wang, and Jia-Bin Huang. A closer look at few-shot classification. 2018.
235
+ Sumit Chopra, Raia Hadsell, Yann LeCun, et al. Learning a similarity metric discriminatively, with application to face verification. In CVPR (1), pp. 539-546, 2005.
236
+
237
+ Zihang Dai, Zhilin Yang, Fan Yang, William W Cohen, and Ruslan R Salakhutdinov. Good semi-supervised learning that requires a bad gan. In Advances in neural information processing systems, pp. 6510-6520, 2017.
238
+ Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pp. 248-255. IEEE, 2009.
239
+ Chelsea Finn, Pieter Abbeel, and Sergey Levine. Model-agnostic meta-learning for fast adaptation of deep networks. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 1126-1135. JMLR.org, 2017.
240
+ Nicholas Frosst, Nicolas Papernot, and Geoffrey Hinton. Analyzing and improving representations with the soft nearest neighbor loss. arXiv:1902.01889, 2019.
241
+ Victor Garcia and Joan Bruna. Few-shot learning with graph neural networks. arXiv:1711.04043, 2017.
242
+ Spyros Gidaris and Nikos Komodakis. Dynamic few-shot visual learning without forgetting. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4367-4375, 2018.
243
+ Yves Grandvalet and Yoshua Bengio. Semi-supervised learning by entropy minimization. In Advances in neural information processing systems, pp. 529-536, 2005.
244
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2016a.
245
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Identity mappings in deep residual networks. arXiv:1603.05027, 2016b.
246
+ Sepp Hochreiter, A Steven Younger, and Peter R Conwell. Learning to learn using gradient descent. In International Conference on Artificial Neural Networks, pp. 87-94. Springer, 2001.
247
+ Jeremy Howard et al. fastai. https://github.com/fastai/fastai, 2018.
248
+ Junlin Hu, Jiwen Lu, and Yap-Peng Tan. Deep transfer metric learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 325-333, 2015.
249
+ Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. arXiv:1502.03167, 2015.
250
+ Xianyan Jia, Shutao Song, Wei He, Yangzihao Wang, Haidong Rong, Feihu Zhou, Liqiang Xie, Zhenyu Guo, Yuanzhou Yang, Liwei Yu, et al. Highly scalable deep learning training system with mixed-precision: TrainingImagenet in four minutes. arXiv:1807.11205, 2018.
251
+ Thorsten Joachims. Transductive inference for text classification using support vector machines. In Icml, volume 99, pp. 200-209, 1999.
252
+ Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv:1412.6980, 2014.
253
+ Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv:1609.02907, 2016.
254
+ Alex Krizhevsky and Geoffrey Hinton. Learning multiple layers of features from tiny images. Technical report, Citeseer, 2009.
255
+ Kwonjoon Lee, Subhransu Maji, Avinash Ravichandran, and Stefano Soatto. Meta-learning with differentiable convex optimization. arXiv:1904.03758, 2019.
256
+ Yanbin Liu, Juho Lee, Minseop Park, Saehoon Kim, Eunho Yang, Sung Ju Hwang, and Yi Yang. Learning to propagate labels: Transductive propagation network for few-shot learning. 2018a.
257
+ Yanbin Liu, Juho Lee, Minseop Park, Saehoon Kim, and Yi Yang. Transductive propagation network for few-shot learning. arXiv:1805.10002, 2018b.
258
+ Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv:1608.03983, 2016.
259
+ Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-SNE. Journal of machine learning research, 9(Nov):2579-2605, 2008.
260
+ Dougal Maclaurin, David Duvenaud, and Ryan Adams. Gradient-based hyperparameter optimization through reversible learning. In International Conference on Machine Learning, pp. 2113-2122, 2015.
261
+ Paulius Micikevicius, Sharan Narang, Jonah Alben, Gregory Diamos, Erich Elsen, David Garcia, Boris Ginsburg, Michael Houston, Oleksii Kuchaiev, Ganesh Venkatesh, et al. Mixed precision training. arXiv:1710.03740, 2017.
262
+
263
+ Takeru Miyato, Shin-ichi Maeda, Masanori Koyama, Ken Nakae, and Shin Ishii. Distributional smoothing with virtual adversarial training. arXiv:1507.00677, 2015.
264
+ Yair Movshovitz-Attias, Alexander Toshev, Thomas K Leung, Sergey Ioffe, and Saurabh Singh. No fuss distance metric learning using proxies. In Proceedings of the IEEE International Conference on Computer Vision, pp. 360-368, 2017.
265
+ Alex Nichol, Joshua Achiam, and John Schulman. On first-order meta-learning algorithms. arXiv:1803.02999, 2018.
266
+ Boris Oreshkin, Pau Rodríguez López, and Alexandre Lacoste. Tadam: Task dependent adaptive metric for improved few-shot learning. In Advances in Neural Information Processing Systems, pp. 719-729, 2018.
267
+ Hang Qi, Matthew Brown, and David G Lowe. Low-shot learning with imprinted weights. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5822-5830, 2018.
268
+ Siyuan Qiao, Chenxi Liu, Wei Shen, and Alan L Yuille. Few-shot image recognition by predicting parameters from activations. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7229-7238, 2018.
269
+ Sachin Ravi and Hugo Larochelle. Optimization as a model for few-shot learning. 2016.
270
+ Avinash Ravichandran, Rahul Bhotika, and Stefano Soatto. Few-shot learning with embedded class models and shot-free meta training, 2019.
271
+ Mengye Ren, Eleni Triantafillou, Sachin Ravi, Jake Snell, Kevin Swersky, Joshua B Tenenbaum, Hugo Larochelle, and Richard S Zemel. Meta-learning for semi-supervised few-shot classification. arXiv:1803.00676, 2018.
272
+ Andrei A Rusu, Dushyant Rao, Jakub Sygnowski, Oriol Vinyals, Razvan Pascanu, Simon Osindero, and Raia Hadsell. Meta-learning with latent embedding optimization. arXiv:1807.05960, 2018.
273
+ Mehdi Sajjadi, Mehran Javanmardi, and Tolga Tasdizen. Regularization with stochastic transformations and perturbations for deep semi-supervised learning. In Advances in Neural Information Processing Systems, pp. 1163-1171, 2016.
274
+ Jurgen Schmidhuber. Evolutionary principles in self-referential learning. On learning how to learn: The meta-meta... hook.) Diploma thesis, Institut f. Informatik, Tech. Univ. Munich, 1987.
275
+ Leslie N Smith. Cyclic learning rates for training neural networks. In 2017 IEEE Winter Conference on Applications of Computer Vision (WACV), pp. 464-472. IEEE, 2017.
276
+ Jake Snell, Kevin Swersky, and Richard Zemel. Prototypical networks for few-shot learning. In Advances in Neural Information Processing Systems, pp. 4077-4087, 2017.
277
+ Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jon Shlens, and Zbigniew Wojna. Rethinking the inception architecture for computer vision. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 2818-2826, 2016.
278
+ Sebastian Thrun. Lifelong learning algorithms. In Learning to learn, pp. 181-209. Springer, 1998.
279
+ Eleni Triantafillou, Tyler Zhu, Vincent Dumoulin, Pascal Lamblin, Kelvin Xu, Ross Goroshin, Carles Gelada, Kevin Swersky, Pierre-Antoine Manzagol, and Hugo Larochelle. Meta-dataset: A dataset of datasets for learning to learn from few examples. arXiv preprint arXiv:1903.03096, 2019.
280
+ Paul E Utgoff. Shift of bias for inductive concept learning. Machine learning: An artificial intelligence approach, 2:107-148, 1986.
281
+ Vladimir Vapnik. The nature of statistical learning theory. Springer science & business media, 2013.
282
+ Oriol Vinyals, Charles Blundell, Timothy Lillicrap, Daan Wierstra, et al. Matching networks for one shot learning. In Advances in neural information processing systems, pp. 3630-3638, 2016.
283
+ Junyuan Xie, Tong He, Zhi Zhang, Hang Zhang, Zhongyue Zhang, and Mu Li. Bag of tricks for image classification with convolutional neural networks. arXiv:1812.01187, 2018.
284
+ Sergey Zagoruyko and Nikos Komodakis. Wide residual networks. arXiv:1605.07146, 2016.
285
+ Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk minimization. arXiv:1710.09412, 2017.
286
+ Dengyong Zhou, Olivier Bousquet, Thomas N Lal, Jason Weston, and Bernhard Scholkopf. Learning with local and global consistency. In Advances in neural information processing systems, pp. 321-328, 2004.
287
+
288
+ # A SETUP
289
+
290
+ # A.1 DATASETS
291
+
292
+ We use the following datasets for our benchmarking experiments.
293
+
294
+ - The Mini-ImageNet dataset (Vinyals et al., 2016) which is a subset of ImageNet-1k (Deng et al., 2009) and consists of $84 \times 84$ sized images with 600 images per class. There are 64 training, 16 validation and 20 test classes. There are multiple versions of this dataset in the literature; we obtained the dataset from the authors of Gidaris & Komodakis (2018) $^3$ .
295
+ - The Tiered-ImageNet dataset (Ren et al., 2018) is a larger subset of ImageNet-1k with 608 classes split as 351 training, 97 validation and 160 testing classes, each with about 1300 images of size $84 \times 84$ . This dataset ensures that training, validation and test classes do not have a semantic overlap and is a potentially harder few-shot learning dataset.
296
+ - We also consider two smaller CIFAR-100 (Krizhevsky & Hinton, 2009) derivatives, both with $32 \times 32$ sized images and 600 images per class. The first is the CIFAR-FS dataset (Bertinetto et al., 2018) which splits classes randomly into 64 training, 16 validation and 20 test. The second is the FC-100 dataset (Oreshkin et al., 2018) which splits CIFAR-100 into 60 training, 20 validation and 20 test classes with minimal semantic overlap.
297
+
298
+ Each dataset has a training, validation and test set. The set of classes for each of these sets are disjoint from each other. For meta-training, we ran two sets of experiments: the first, where we only use the training set as the meta-training dataset, denoted by (train); the second, where we use both the training and validation sets as the meta-training dataset, denoted by (train + val). We use the test set to construct few-shot episodes.
299
+
300
+ # A.2 PRE-TRAINING
301
+
302
+ We use a wide residual network (Zagoruyko & Komodakis, 2016; Qiao et al., 2018; Rusu et al., 2018) with a widening factor of 10 and a depth of 28 which we denote as WRN-28-10. The smaller networks: conv $(64)_{\times 4}$ (Vinyals et al., 2016; Snell et al., 2017), ResNet-12 (He et al., 2016a; Oreshkin et al., 2018; Lee et al., 2019) and WRN-16-4 (Zagoruyko & Komodakis, 2016), are used for analysis in Appendix C. All networks are trained using SGD with a batch-size of 256, Nesterov's momentum set to 0.9, no dropout, weight decay of $10^{-4}$ . We use batch-normalization (Ioffe & Szegedy, 2015). We use two-cycles of learning rate annealing (Smith, 2017), these are 40 and 80 epochs each for all datasets except ImageNet-21k, which uses cycles of 8 and 16 epochs each. The learning rate is set to $10^{-i}$ at the beginning of the $i^{\text{th}}$ cycle and decreased to $10^{-6}$ by the end of that cycle with a cosine schedule (Loshchilov & Hutter, 2016). We use data parallelism across 8 Nvidia V100 GPUs and half-precision training using techniques from Micikevicius et al. (2017); Howard et al. (2018).
303
+
304
+ We use the following regularization techniques that have been discovered in the non-few-shot, standard image classification literature (Xie et al., 2018) for pre-training the backbone.
305
+
306
+ - Mixup (Zhang et al., 2017): This augments data by a linear interpolation between input images and their one-hot labels. If $(x_{1},y_{1}),(x_{2},y_{2})\in \mathcal{D}$ are two samples, mixup creates a new sample $(\tilde{x},\tilde{y})$ where $\tilde{x} = \lambda x_1 + (1 - \lambda)x_2$ and its label $\tilde{y} = \lambda e_{y_1} + (1 - \lambda)e_{y_2}$ ; here $e_k$ is the one-hot vector with a non-zero $k^{\mathrm{th}}$ entry and $\lambda \in [0,1]$ is sampled from $\operatorname{Beta}(\alpha ,\alpha)$ for a hyper-parameter $\alpha$ .
307
+ - Label smoothing (Szegedy et al., 2016): When using a softmax operator, the logits can increase or decrease in an unbounded manner causing numerical instabilities while training. Label smoothing sets $p_{\theta}(k|x) = 1 - \epsilon$ if $k = y$ and $\epsilon/(K - 1)$ otherwise, for a small constant $\epsilon > 0$ and number of classes $K$ . The ratio between the largest and smallest output neuron is thus fixed which helps large-scale training.
308
+ - We exclude the batch-normalization parameters from weight-decay (Jia et al., 2018).
309
+
310
+ We set $\epsilon = 0.1$ for label smoothing cross-entropy loss and $\alpha = 0.25$ for mixup regularization for all our experiments.
311
+
312
+ # A.3 FINE-TUNING HYPER-PARAMETERS
313
+
314
+ We used 1-shot 5-way episodes on the validation set of Mini-[ImageNet to manually tune hyperparameters. Fine-tuning is done for 25 epochs with a fixed learning rate of $5 \times 10^{-5}$ with Adam (Kingma & Ba, 2014). Adam is used here as it is more robust to large changes in the magnitude of the loss and gradients which occurs if the number of classes in the few-shot episode (ways) is large. We do not use any regularization (weight-decay, mixup, dropout, or label smoothing) in the fine-tuning phase. These hyper-parameters are kept constant on all benchmark datasets, namely Mini-[ImageNet, Tiered-[ImageNet, CIFAR-FS and FC-100.
315
+
316
+ All fine-tuning and evaluation is performed on a single GPU in full-precision. We update the parameters sequentially by computing the gradient of the two terms in (8) independently. This updates both the weights of the model and the batch-normalization parameters.
317
+
318
+ # A.4 DATA AUGMENTATION
319
+
320
+ Input images are normalized using the mean and standard-deviation computed on ImageNet-1k. Our Data augmentation consists of left-right flips with probability of 0.5, padding the image with 4px and adding brightness and contrast changes of $\pm 40\%$ . The augmentation is kept the same for both pre-training and fine-tuning.
321
+
322
+ We explored augmentation using affine transforms of the images but found that adding this has minor effect with no particular trend on the numerical results.
323
+
324
+ # A.5 EVALUATION PROCEDURE
325
+
326
+ The few-shot episode contains classes that are uniformly sampled from the test classes of corresponding datasets. Support and query samples are further uniformly sampled for each class. The query shot is fixed to 15 for all experiments unless noted otherwise. We evaluate all networks over 1,000 episodes unless noted otherwise. For ease of comparison, we report the mean accuracy and the $95\%$ confidence interval of the estimate of the mean accuracy.
327
+
328
+ # B SETUP FOR IMAGENET-21K
329
+
330
+ The ImageNet-21k dataset (Deng et al., 2009) has 14.2M images across 21,814 classes. The blue region in Fig. 4 denotes our meta-training set with 7,491 classes, each with more than 1,000 images. The green region shows 13,007 classes with at least 10 images each, the set used to construct few-shot episodes. We do not use the red region consisting of 1,343 classes with less than 10 images each. We train the same backbone (WRN-28-10) with the same procedure as that in Appendix A on $84 \times 84$ resized images, albeit for only 24 epochs. Since we use the same hyper-parameters as the other benchmark datasets, we did not create validation sets for pre-training or the fine-tuning phases. The few-shot episodes are constructed in the same way as Appendix A. We evaluate using fewer few-shot episodes (80) on this dataset because we would like to demonstrate the performance across a large number of different ways.
331
+
332
+ # C ADDITIONAL ANALYSIS
333
+
334
+ This section contains additional experiments and analysis, complementing Section 4.3. All experiments use the (train + val) setting, pre-training on both the training and validation sets of the corresponding datasets, unless noted otherwise.
335
+
336
+ ![](images/879861d97c913b5f6c7d9244714a6d3b0ce2a0d518baa388612402a4cf2813c3.jpg)
337
+ Figure 4: ImageNet-21k is a highly imbalanced dataset. The most frequent class has about 3K images while the rarest class has a single image.
338
+
339
+ ![](images/4f3cd319a2e0efdc2cd28868ef91c968d25a1d9cac38d41f22613a75890b1517.jpg)
340
+ Figure 5: t-SNE (Maaten & Hinton, 2008) embedding of the logits for 1-shot 5-way few-shot episode of Mini-ImageNet. Colors denote the ground-truth labels; crosses denote the support samples; circles denote the query samples; translucent markers and opaque markers denote the embeddings before and after transductive fine-tuning respectively. Even though query samples are far away from their respective supports in the beginning, they move towards the supports by the end of transductive fine-tuning. Logits of support samples are relatively unchanged which suggests that the support-based initialization is effective.
341
+
342
+ # C.1 TRANSDUCTIVE FINE-TUNING CHANGES THE EMBEDDING DRAMATICALLY
343
+
344
+ Fig. 5 demonstrates this effect. The logits for query samples are far from those of their respective support samples and metric-based loss functions, e.g., those for prototypical networks (Snell et al., 2017) would have a high loss on this episode; indeed the accuracy after the support-based initialization is $64\%$ . Logits for the query samples change dramatically during transductive fine-tuning and majority of the query samples cluster around their respective supports. The post transductive fine-tuning accuracy of this episode is $73.3\%$ . This suggests that modifying the embedding using the query samples is crucial to obtaining good performance on new classes. This example also demonstrates that the support-based initialization is efficient, logits of the support samples are relatively unchanged during the transductive fine-tuning phase.
345
+
346
+ # C.2 LARGE VS. SMALL BACKBONES
347
+
348
+ The expressive power of the backbone plays an important role in the efficacy of fine-tuning. We observed that a WRN-16-4 architecture (2.7M parameters) performs worse than WRN-28-10 (36M parameters). The former obtains $63.28 \pm 0.68\%$ and $77.39 \pm 0.5\%$ accuracy on Mini-[ImageNet and $69.04 \pm 0.69\%$ and $83.55 \pm 0.51\%$ accuracy on Tiered-[ImageNet on 1-shot 5-way and 5-shot 5-way protocols respectively. While these numbers are comparable to those of state-of-the-art algorithms, they are lower than their counterparts for WRN-28-10 in Table 1. This suggests that a larger network is effective in learning richer features from the meta-training classes, and fine-tuning is effective in taking advantage of this to further improve performance on samples belonging to few-shot classes.
349
+
350
+ # C.3 LATENCY WITH A SMALLER BACKBONES
351
+
352
+ The WRN-16-4 architecture (2.7M parameters) is much smaller than WRN-28-10 (36M parameters) and transductive fine-tuning on the former is much faster. As compared to our implementation of Snell et al. (2017) with the same backbone, WRN-16-4 is $20 - 70 \times$ slower (0.87 vs. 0.04 seconds for a query shot of 1, and 2.85 vs. 0.04 seconds for a query shot of 15) for the 1-shot 5-way scenario. Compare this to the computational complexity experiment in Section 4.3.
353
+
354
+ As discussed in Appendix C.2, the accuracy of WRN-16-4 is $63.28 \pm 0.68\%$ and $77.39 \pm 0.5\%$ for 1-shot 5-way and 5-shot 5-way on Mini-[ImageNet respectively. As compared to this, our implementation of (Snell et al., 2017) using a WRN-16-4 backbone obtains $57.29 \pm 0.40\%$ and $75.34 \pm 0.32\%$ accuracies for the same settings respectively; the former number in particular is significantly worse than its transductive fine-tuning counterpart.
355
+
356
+ # C.4 COMPARISONS AGAINST BACKBONES IN THE CURRENT LITERATURE
357
+
358
+ We include experiments using conv $(64)_{\times 4}$ and ResNet-12 in Table 3, in addition to WRN-28-10 in Section 4, in order to facilitate comparisons of the proposed baseline for different backbone architectures. Our results are comparable or better than existing results for a given backbone architecture, except for those in Liu et al. (2018b) who use a graph-based transduction algorithm, for conv $(64)_{\times 4}$ on Mini-ImageNet. In line with our goal for simplicity, we kept the hyper-parameters for pre-training and fine-tuning the same as the ones used for WRN-28-10 (cf. Sections 3 and 4). These results suggest that transductive fine-tuning is a sound baseline for a variety of backbone architectures.
359
+
360
+ # C.5 USING MORE META-TRAINING CLASSES
361
+
362
+ In Section 4.1 we observed that having more pre-training classes improves few-shot performance. But since we append a classifier on top of a pre-trained backbone and use the logits of the backbone as inputs to the classifier, a backbone pre-trained on more classes would also have more parameters as compared to one pre-trained on fewer classes. However, this difference is not large: WRN-28-10 for Mini-[ImageNet has $0.03\%$ more parameters for (train + val) as compared to (train). However, in order to facilitate a fair comparison, we ran an experiment where we use the features of the backbone, instead of the logits, as inputs to the classifier. By doing so, the number of parameters in the pre-trained backbone that are used for few-shot classification remain the same for both the (train) and (train + val) settings. For Mini-[ImageNet, (train + val) obtains $64.20 \pm 0.65\%$ and $81.26 \pm 0.45\%$ , and (train) obtains $62.55 \pm 0.65\%$ and $78.89 \pm 0.46\%$ , for 1-shot 5-way and 5-shot 5-way respectively. These results corroborate the original statement that more pre-training classes improves few-shot performance.
363
+
364
+ # C.6 USING FEATURES OF THE BACKBONE AS INPUT TO THE CLASSIFIER
365
+
366
+ Instead of re-initializing the final fully-connected layer of the backbone to classify new classes, we simply append the classifier on top of it. We implemented the former, more common, approach and found that it achieves an accuracy of $64.20 \pm 0.65\%$ and $81.26 \pm 0.45\%$ for 1-shot 5-way and 5-shot 5-way respectively on Mini-ImageNet, while the accuracy on Tiered-ImageNet is $67.14 \pm$
367
+
368
+ Table 3: Few-shot accuracies on benchmark datasets for 5-way few-shot episodes. The notation conv $(64^{k})_{\times 4}$ denotes a CNN with 4 layers and $64^{k}$ channels in the $k^{\mathrm{th}}$ layer. The rows are grouped by the backbone architectures. Best results in each column and for a given backbone architecture are shown in bold. Results where the support-based initialization is better than or comparable to existing algorithms are denoted by $\dagger$ . The notation (train + val) indicates that the backbone was pre-trained on both training and validation sets of the datasets; the backbone is trained only on the training set otherwise. (Lee et al., 2019) uses a $1.25\times$ wider ResNet-12 which we denote as ResNet-12 *.
369
+
370
+ <table><tr><td rowspan="2">Algorithm</td><td rowspan="2">Architecture</td><td colspan="2">Mini-ImageNet</td><td colspan="2">Tiered-ImageNet</td><td colspan="2">CIFAR-FS</td><td colspan="2">FC-100</td></tr><tr><td>1-shot (%)</td><td>5-shot (%)</td><td>1-shot (%)</td><td>5-shot (%)</td><td>1-shot (%)</td><td>5-shot (%)</td><td>1-shot (%)</td><td>5-shot (%)</td></tr><tr><td>MAML (Finn et al., 2017)</td><td>conv (32)×4</td><td>48.70 ± 1.84</td><td>63.11 ± 0.92</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Matching networks (Vinyals et al., 2016)</td><td>conv (64)×4</td><td>46.6</td><td>60</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>LSTM meta-learner (Ravi &amp; Larochelle, 2016)</td><td>conv (64)×4</td><td>43.44 ± 0.77</td><td>60.60 ± 0.71</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Prototypical Networks (Snell et al., 2017)</td><td>conv (64)×4</td><td>49.42 ± 0.78</td><td>68.20 ± 0.66</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Transductive Propagation (Liu et al., 2018b)</td><td>conv (64)×4</td><td>55.51 ± 0.86</td><td>69.86 ± 0.65</td><td>59.91 ± 0.94</td><td>73.30 ± 0.75</td><td></td><td></td><td></td><td></td></tr><tr><td>Support-based initialization (train)</td><td>conv (64)×4</td><td>50.69 ± 0.63</td><td>66.07 ± 0.53</td><td>58.42 ± 0.69</td><td>73.98 ± 0.58†</td><td>61.77 ± 0.73</td><td>76.40 ± 0.54</td><td>36.07 ± 0.54</td><td>48.72 ± 0.57</td></tr><tr><td>Fine-tuning (train)</td><td>conv (64)×4</td><td>49.43 ± 0.62</td><td>66.42 ± 0.53</td><td>57.45 ± 0.68</td><td>73.96 ± 0.56</td><td>59.74 ± 0.72</td><td>76.37 ± 0.53</td><td>35.46 ± 0.53</td><td>49.43 ± 0.57</td></tr><tr><td>Transductive fine-tuning (train)</td><td>conv (64)×4</td><td>50.46 ± 0.62</td><td>66.68 ± 0.52</td><td>58.05 ± 0.68</td><td>74.24 ± 0.56</td><td>61.73 ± 0.72</td><td>76.92 ± 0.52</td><td>36.62 ± 0.55</td><td>50.24 ± 0.58</td></tr><tr><td>R2D2 (Bertinetto et al., 2018)</td><td>conv (96k)×4</td><td>51.8 ± 0.2</td><td>68.4 ± 0.2</td><td></td><td></td><td>65.4 ± 0.2</td><td>79.4 ± 0.2</td><td></td><td></td></tr><tr><td>TADAM (Oreshkin et al., 2018)</td><td>ResNet-12</td><td>58.5 ± 0.3</td><td>76.7 ± 0.3</td><td></td><td></td><td></td><td></td><td>40.1 ± 0.4</td><td>56.1 ± 0.4</td></tr><tr><td>Transductive Propagation (Liu et al., 2018b)</td><td>ResNet-12</td><td>59.46</td><td>75.64</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>Support-based initialization (train)</td><td>ResNet-12</td><td>54.21 ± 0.64</td><td>70.58 ± 0.54</td><td>66.39 ± 0.73</td><td>81.93 ± 0.54</td><td>65.69 ± 0.72</td><td>79.95 ± 0.51</td><td>35.51 ± 0.53</td><td>48.26 ± 0.54</td></tr><tr><td>Fine-tuning (train)</td><td>ResNet-12</td><td>56.67 ± 0.62</td><td>74.80 ± 0.51</td><td>64.45 ± 0.70</td><td>83.59 ± 0.51</td><td>64.66 ± 0.73</td><td>82.13 ± 0.50</td><td>37.52 ± 0.53</td><td>55.39 ± 0.57</td></tr><tr><td>Transductive fine-tuning (train)</td><td>ResNet-12</td><td>62.35 ± 0.66</td><td>74.53 ± 0.54</td><td>68.41 ± 0.73</td><td>83.41 ± 0.52</td><td>70.76 ± 0.74</td><td>81.56 ± 0.53</td><td>41.89 ± 0.59</td><td>54.96 ± 0.55</td></tr><tr><td>MetaOpt SVM (Lee et al., 2019)</td><td>ResNet-12*</td><td>62.64 ± 0.61</td><td>78.63 ± 0.46</td><td>65.99 ± 0.72</td><td>81.56 ± 0.53</td><td>72.0 ± 0.7</td><td>84.2 ± 0.5</td><td>41.1 ± 0.6</td><td>55.5 ± 0.6</td></tr><tr><td>Support-based initialization (train)</td><td>WRN-28-10</td><td>56.17 ± 0.64</td><td>73.31 ± 0.53</td><td>67.45 ± 0.70</td><td>82.88 ± 0.53</td><td>70.26 ± 0.70</td><td>83.82 ± 0.49</td><td>36.82 ± 0.51</td><td>49.72 ± 0.55</td></tr><tr><td>Fine-tuning (train)</td><td>WRN-28-10</td><td>57.73 ± 0.62</td><td>78.17 ± 0.49</td><td>66.58 ± 0.70</td><td>85.55 ± 0.48</td><td>68.72 ± 0.67</td><td>86.11 ± 0.47</td><td>38.25 ± 0.52</td><td>57.19 ± 0.57</td></tr><tr><td>Transductive fine-tuning (train)</td><td>WRN-28-10</td><td>65.73 ± 0.68</td><td>78.40 ± 0.52</td><td>73.34 ± 0.71</td><td>85.50 ± 0.50</td><td>76.58 ± 0.68</td><td>85.79 ± 0.50</td><td>43.16 ± 0.59</td><td>57.57 ± 0.55</td></tr><tr><td>Support-based initialization (train + val)</td><td>conv (64)×4</td><td>52.77 ± 0.64</td><td>68.29 ± 0.54</td><td>59.08 ± 0.70</td><td>74.62 ± 0.57</td><td>64.01 ± 0.71</td><td>78.46 ± 0.53</td><td>40.25 ± 0.56</td><td>54.53 ± 0.57</td></tr><tr><td>Fine-tuning (train + val)</td><td>conv (64)×4</td><td>51.40 ± 0.61</td><td>68.58 ± 0.52</td><td>58.04 ± 0.68</td><td>74.48 ± 0.56</td><td>62.12 ± 0.71</td><td>77.98 ± 0.52</td><td>39.09 ± 0.55</td><td>54.83 ± 0.55</td></tr><tr><td>Transductive fine-tuning (train + val)</td><td>conv (64)×4</td><td>52.30 ± 0.61</td><td>68.78 ± 0.53</td><td>58.81 ± 0.69</td><td>74.71 ± 0.56</td><td>63.89 ± 0.71</td><td>78.48 ± 0.52</td><td>40.33 ± 0.56</td><td>55.60 ± 0.56</td></tr><tr><td>Support-based initialization (train + val)</td><td>ResNet-12</td><td>56.79 ± 0.65</td><td>72.94 ± 0.55</td><td>67.60 ± 0.71</td><td>83.09 ± 0.53</td><td>69.39 ± 0.71</td><td>83.27 ± 0.50</td><td>43.11 ± 0.58</td><td>58.16 ± 0.57</td></tr><tr><td>Fine-tuning (train + val)</td><td>ResNet-12</td><td>58.64 ± 0.64</td><td>76.83 ± 0.50</td><td>65.55 ± 0.70</td><td>84.51 ± 0.50</td><td>68.11 ± 0.70</td><td>85.19 ± 0.48</td><td>42.84 ± 0.57</td><td>63.10 ± 0.57</td></tr><tr><td>Transductive fine-tuning (train + val)</td><td>ResNet-12</td><td>64.50 ± 0.68</td><td>76.92 ± 0.55</td><td>69.48 ± 0.73</td><td>84.37 ± 0.51</td><td>74.35 ± 0.71</td><td>84.57 ± 0.53</td><td>48.29 ± 0.63</td><td>63.38 ± 0.58</td></tr><tr><td>MetaOpt SVM (Lee et al., 2019) (train + val)</td><td>ResNet-12*</td><td>64.09 ± 0.62</td><td>80.00 ± 0.45</td><td>65.81 ± 0.74</td><td>81.75 ± 0.53</td><td>72.8 ± 0.7</td><td>85.0 ± 0.5</td><td>47.2 ± 0.6</td><td>62.5 ± 0.6</td></tr><tr><td>Activation to Parameter (Qiao et al., 2018) (train + val)</td><td>WRN-28-10</td><td>59.60 ± 0.41</td><td>73.74 ± 0.19</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>LEO (Rusu et al., 2018) (train + val)</td><td>WRN-28-10</td><td>61.76 ± 0.08</td><td>77.59 ± 0.12</td><td>66.33 ± 0.05</td><td>81.44 ± 0.09</td><td></td><td></td><td></td><td></td></tr><tr><td>Support-based initialization (train + val)</td><td>WRN-28-10</td><td>58.47 ± 0.66</td><td>75.56 ± 0.52</td><td>67.34 ± 0.69†</td><td>83.32 ± 0.51†</td><td>72.14 ± 0.69</td><td>85.21 ± 0.49</td><td>45.08 ± 0.61</td><td>60.05 ± 0.60</td></tr><tr><td>Fine-tuning (train + val)</td><td>WRN-28-10</td><td>59.62 ± 0.66</td><td>79.93 ± 0.47</td><td>66.23 ± 0.68</td><td>86.08 ± 0.47</td><td>70.07 ± 0.67</td><td>87.26 ± 0.45</td><td>43.80 ± 0.58</td><td>64.40 ± 0.58</td></tr><tr><td>Transductive fine-tuning (train + val)</td><td>WRN-28-10</td><td>68.11 ± 0.69</td><td>80.36 ± 0.50</td><td>72.87 ± 0.71</td><td>86.15 ± 0.50</td><td>78.36 ± 0.70</td><td>87.54 ± 0.49</td><td>50.44 ± 0.68</td><td>65.74 ± 0.60</td></tr></table>
371
+
372
+ $0.74\%$ and $86.67 \pm 0.46\%$ for 1-shot 5-way and 5-shot 5-way respectively. These numbers are significantly lower for the 1-shot 5-way protocol on both datasets compared to their counterparts in Table 1. However, the 5-shot 5-way accuracy is marginally higher in this experiment than that in Table 1. As noted in Remark 2, logits of the backbone are well-clustered and that is why they work better for few-shot scenarios.
373
+
374
+ # C.7 FREEZING THE BACKBONE RESTRICTS PERFORMANCE
375
+
376
+ The previous observation suggests that the network changes a lot in the fine-tuning phase. Freezing the backbone severely restricts the changes in the network to only changes to the classifier. As a consequence, the accuracy of freezing the backbone is $58.38 \pm 0.66\%$ and $75.46 \pm 0.52\%$ on Mini-ImageNet and $67.06 \pm 0.69\%$ and $83.20 \pm 0.51\%$ on Tiered-ImageNet for 1-shot 5-way and 5-shot 5-way respectively. While the 1-shot 5-way accuracies are much lower than their counterparts in Table 1, the gap in the 5-shot 5-way scenario is smaller.
377
+
378
+ # C.8 USING MIXUP DURING PRE-TRAINING
379
+
380
+ Mixup improves the few-shot accuracy by about $1\%$ ; the accuracy for WRN-28-10 trained without mixup is $67.06 \pm 0.71\%$ and $79.29 \pm 0.51\%$ on Mini-[ImageNet] for 1-shot 5-way and 5-shot 5-way respectively.
381
+
382
+ # C.9 MORE FEW-SHOT EPISODES
383
+
384
+ Fig. 1 suggests that the standard deviation of the accuracies achieved by few-shot algorithms is high. Considering this randomness, evaluations were done over 10,000 few-shot episodes as well. The accuracies on Mini-ImageNet are $67.77 \pm 0.21\%$ and $80.24 \pm 0.16\%$ and on Tiered-ImageNet are $72.36 \pm 0.23\%$ and $85.70 \pm 0.16\%$ for 1-shot 5-way and 5-shot 5-way respectively. The numbers are consistent with the ones for 1,000 few-shot episodes in Table 1, though the confidence intervals decreased as the number of episodes sampled increased.
385
+
386
+ # C.10 EVALUATION ON META-DATASET
387
+
388
+ Table 4: Few-shot accuracies on Meta-Dataset: Best results in each row are shown in bold. 600 few-shot episodes were used to compare to the results reported in Triantafillou et al. (2019).
389
+
390
+ <table><tr><td>Dataset</td><td>Best performance in Triantafillou et al. (2019)</td><td>Transductive Fine-tuning</td><td>Rank for Transductive Fine-tuning (based on Triantafillou et al. (2019))</td></tr><tr><td>ImageNet-1k (ILSVRC)</td><td>51.01 ± 1.05</td><td>55.57 ± 1.02</td><td>1</td></tr><tr><td>Omniglot</td><td>63.00 ± 1.35</td><td>79.59 ± 0.98</td><td>1</td></tr><tr><td>Aircraft</td><td>68.69 ± 1.26</td><td>67.26 ± 0.98</td><td>1.5</td></tr><tr><td>Birds</td><td>68.79 ± 1.01</td><td>74.26 ± 0.82</td><td>1</td></tr><tr><td>Textures</td><td>69.05 ± 0.90</td><td>77.35 ± 0.74</td><td>1</td></tr><tr><td>VGG Flowers</td><td>86.86 ± 0.75</td><td>88.14 ± 0.63</td><td>1.5</td></tr><tr><td>Traffic Signs</td><td>66.79 ± 1.31</td><td>55.98 ± 1.32</td><td>2</td></tr><tr><td>MSCOCO</td><td>43.41 ± 1.06</td><td>40.62 ± 0.98</td><td>2.5</td></tr><tr><td>Average Rank</td><td></td><td></td><td>1.4375</td></tr></table>
391
+
392
+ We ran experiments on Meta-Dataset (Triantafillou et al., 2019), and compared the performance of transductive fine-tuning for meta-training done on ImageNet-1k (ILSVRC) in Table 4. Transductive fine-tuning is better, most times significantly, than state-of-the-art on 6 out of 8 tasks in Meta-Dataset; its average rank across all tasks is 1.4375 (calculated using the results reported in Triantafillou et al. (2019)). The Fungi and Quick Draw datasets were not included because of issues with getting the data; the link to access the dataset for the former does not seem to work and the latter requires certain legal conditions which we are working on obtaining.
393
+
394
+ The few-shot episode sampling was done the same way as described in Triantafillou et al. (2019); except for the few-shot class sampling for ImageNet-1k (ILSVRC) and Omniglot, which was done uniformly over all few-shot classes (Triantafillou et al. (2019) use a hierarchical sampling technique to sample classes that are far from each other in the hierarchy, and hence easier to distinguish between). The hyper-parameters used for meta-training and few-shot fine-tuning are kept the same as the ones in Section 4 and are not tuned for these experiments.
395
+
396
+ # D FREQUENTLY ASKED QUESTIONS
397
+
398
+ # 1. Why has it not been noticed yet that this simple approach works so well?
399
+
400
+ Non-transductive fine-tuning as a baseline has been considered before (Vinyals et al., 2016; Chen et al., 2018). The fact that this is comparable to state-of-the-art has probably gone unnoticed because of the following reasons:
401
+
402
+ - Given that there are only a few labeled support samples provided in the few-shot setting, initializing the classifier becomes important. The support-based initialization (cf. Section 3.1) motivated from the deep metric learning literature (Hu et al., 2015; Movshovitz-Attias et al., 2017; Qi et al., 2018; Gidaris & Komodakis, 2018) classifies support samples correctly (for a support shot of 1, this may not be true for higher support shots). This initialization, as opposed to initializing the weights of the classifier randomly, was critical to performance in our experiments.
403
+ - In our experience, existing meta-training methods, both gradient-based ones and metric-based ones, are difficult to tune for larger architectures. We speculate that this is the reason a large part of the existing literature focuses on smaller backbone architectures. The few-shot learning literature has only recently started to move towards bigger backbone architectures (Oreshkin et al., 2018; Rusu et al., 2018). From Table 3 we see that non-tranductive finetuning gets better with a deeper backbone architecture. A similar observation was made by (Chen et al., 2018). The observation that we can use "simple" well-understood training techniques from standard supervised learning that scale up to large backbone architectures for few-shot classification is a key contribution of our paper.
404
+
405
+ Transductive methods have recently started to become popular in the few-shot learning literature (Nichol et al., 2018; Liu et al., 2018a). Because of the scarcity of labeled support samples, it is crucial to make use of the unlabeled query samples in the few-shot regime.
406
+
407
+ Our advocated baseline makes use of both a good initialization and transduction, relatively new in the few-shot learning literature, which makes this simplistic approach go unrecognized till now.
408
+
409
+ # 2. Transductive fine-tuning works better than existing algorithms because of a big backbone architecture. One should compare on the same backbone architectures as the existing algorithms for a fair comparison.
410
+
411
+ The current literature is in the spirit of increasingly sophisticated approaches for modest performance gains, often with different architectures (cf. Table 1). This is why we set out to establish a baseline. Our simple baseline is comparable or better than existing approaches. The backbone we have used is common in the recent few-shot learning literature (Rusu et al., 2018; Qiao et al., 2018) (cf. Table 1). Additionally, we have included results on smaller common backbone architectures, namely conv $(64)_{\times 4}$ and ResNet-12 in Appendix C.4, and some additional experiments in Appendix C.2. These experiments suggest that transductive fine-tuning is a sound baseline for a variety of different backbone architectures. This indicates that we should take results on existing benchmarks with a grain of salt. Also see the response to question 1 above.
412
+
413
+ # 3. There are missing entries in Tables 1 and 3. Is it still a fair comparison?
414
+
415
+ Tables 1 and 3 show all relevant published results by the original authors. Re-implementing existing algorithms to fill missing entries without access to original code is impractical and often yields results inferior to those published, which may be judged as unfair. The purpose of a benchmark is to enable others to test their method easily. This does not exist today due to myriad performance-critical design choices often not detailed in the papers. In fact, missing entries in the table indicate the inadequate state of the current literature. Our work enables benchmarking relative to a simple, systematic baseline.
416
+
417
+ # 4. Fine-tuning for few-shot learning is not novel.
418
+
419
+ We do not claim novelty in this paper. Transductive fine-tuning is our advocated baseline for few-shot classification. It is a combination of different techniques that are not novel. Yet, it performs better than existing algorithms on all few-shot protocols with fixed hyper-parameters. We emphasize that this indicates the need to re-interpret existing results on benchmarks and re-evaluate the status quo in the literature.
420
+
421
+ # 5. Transductive fine-tuning has a very high latency at inference time, this is not practical.
422
+
423
+ Our goal is to establish a systematic baseline for accuracy, which might help judge the accuracy of few-shot learning algorithms in the future. The question of test-time latency is indeed important but we have not focused on it in this paper. Appendix C.3 provides results using a smaller backbone where we see that the WRN-16-4 network is about 20-70x slower than metric-based approaches employing the same backbone while having significantly better accuracy. The latencies with WRN-28-10 are larger (see the computational complexity section in Section 4.3) but with a bigger advantage in terms of accuracy.
424
+
425
+ There are other transductive methods used for few-shot classification (Nichol et al., 2018; Liu et al., 2018a), that are expected to be slow as well.
426
+
427
+ # 6. Transductive fine-tuning does not make sense in the online setting when query samples are shown in a sequence.
428
+
429
+ Transductive fine-tuning can be performed even with a single test datum. Indeed, the network can specialize itself completely to classify this one datum. We explore a similar scenario in Section 4.3 and Fig. 2a, which discuss the performance of transductive fine-tuning with a query shot of 1 (this means 5 query samples one from each class for 5-way evaluation). Note that the loss function in (8) leverages multiple query samples when available. It does not require that the query samples be balanced in terms of their ground-truth classes. In particular, the loss function in (8) is well-defined even for a single test datum. For concerns about latency, see the question 5 above.
430
+
431
+ # 7. Having transductive approaches will incentivize hacking the query set.
432
+
433
+ There are already published methods that use transductive methods (Nichol et al., 2018; Liu et al., 2018a), and it is a fundamental property of the transductive paradigm to be dependent on the query set, in addition to the support set. In order to prevent query set hacking, we will make the test episodes public which will enable consistent benchmarking, even for transductive methods.
434
+
435
+ # 8. Why is having the same hyper-parameters for different few-shot protocols so important?
436
+
437
+ A practical few-shot learning algorithm should be able to handle any few-shot protocol. Having one model for each different scenario is unreasonable in the real-world, as the number of different scenarios is, in principle, infinite. Current algorithms do not handle this well. A single model which can handle any few-shot scenario is thus desirable.
438
+
439
+ # 9. Is this over-fitting to the test datum?
440
+
441
+ No, label of the test datum is not used in the loss function.
442
+
443
+ # 10. Can you give some intuition about the hardness metric? How did you come up with the formula?
444
+
445
+ The hardness metric is the clustering loss where the labeled support samples form the centers of the class-specific clusters. The special form, namely, $E_{(x,y) \in \mathcal{D}_{\mathbf{q}}} \log \frac{1 - p(y|x)}{p(y|x)}$ (cf. (9)) allows an interpretation of log-odds. We used this form because it is sensitive to the number of few-shot classes (cf. Fig. 3). Similar metrics, e.g., $E_{(x,y) \in \mathcal{D}_{\mathbf{q}}}[-\log p(y|x)]$ can also be used but they come with a few caveats. Note that it is easier for $p(y|x)$ to be large for small way because the normalization constant in softmax has fewer terms. For large way, $p(y|x)$ could be smaller. This effect is better captured by our metric.
446
+
447
+ # 11. How does Fig. 3 look for algorithm X, Y, Z?
448
+
449
+ We compared two algorithms in Fig. 3, namely transductive fine-tuning and support-based initialization. Section 4.4 and the caption of Fig. 3 explains how the former algorithm is better. We will consider adding comparisons to other algorithms to this plot in the future.
abaselineforfewshotimageclassification/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1fd17df0ca0242abe0766cfdc494c71c164a06cf5571c77ed451122226b0d62
3
+ size 619684
abaselineforfewshotimageclassification/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a149c7b35d04862e403a6a88db7ce47bf967bdc02abd6734fcdd6b461bdd88c8
3
+ size 628713
abductivecommonsensereasoning/e26663a0-12fd-4b06-a09f-42f58683ef0d_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cf9824189abe7d90962e763c2ed2f4226cc85afa0457a39d5352e3b6559dc1d
3
+ size 100583
abductivecommonsensereasoning/e26663a0-12fd-4b06-a09f-42f58683ef0d_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d8a8598d3df4c88584b8010a20ed71eae2a9cc41ea85d1c64194c19473dd340
3
+ size 125766
abductivecommonsensereasoning/e26663a0-12fd-4b06-a09f-42f58683ef0d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b57913e1d11314d1a6e91a69c6b03d93f03ed37a36c9eced2d35c28f8f3a3db
3
+ size 4867093
abductivecommonsensereasoning/full.md ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ABDUCTIVE COMMONSENSE REASONING
2
+
3
+ Chandra Bhagavatula $\diamond$ , Ronan Le Bras $\diamond$ , Chaitanya Malaviya $\diamond$ , Keisuke Sakaguchi $\diamond$ , Ari Holtzman $\diamond$ , Hannah Rashkin $\diamond$ , Doug Downey $\diamond$ , Scott Wen-tau Yih $\clubsuit$ , Yejin Choi $\diamond$ $\diamond$ Allen Institute for AI, Seattle, WA, USA, $\clubsuit$ Facebook AI, Seattle, WA, USA
4
+ $\diamond$ Paul G. Allen School of Computer Science & Engineering, WA, USA
5
+ {chandrab, ronanlb, chaitanyam, keisukes}@allenai.org
6
+ {arih, hannahr, dougd}@allenai.org
7
+ {yejin}@cs.washington.edu
8
+ {scottyih}@fb.com*
9
+
10
+ # ABSTRACT
11
+
12
+ Abductive reasoning is inference to the most plausible explanation. For example, if Jenny finds her house in a mess when she returns from work, and remembers that she left a window open, she can hypothesize that a thief broke into her house and caused the mess, as the most plausible explanation. While abduction has long been considered to be at the core of how people interpret and read between the lines in natural language (Hobbs et al., 1988), there has been relatively little research in support of abductive natural language inference and generation.
13
+
14
+ We present the first study that investigates the viability of language-based abductive reasoning. We introduce a challenge dataset, ARIT, that consists of over 20k commonsense narrative contexts and 200k explanations. Based on this dataset, we conceptualize two new tasks - (i) Abductive NLI: a multiple-choice question answering task for choosing the more likely explanation, and (ii) Abductive NLG: a conditional generation task for explaining given observations in natural language. On Abductive NLI, the best model achieves $68.9\%$ accuracy, well below human performance of $91.4\%$ . On Abductive NLG, the current best language generators struggle even more, as they lack reasoning capabilities that are trivial for humans. Our analysis leads to new insights into the types of reasoning that deep pre-trained language models fail to perform--despite their strong performance on the related but more narrowly defined task of entailment NLI--pointing to interesting avenues for future research.
15
+
16
+ # 1 INTRODUCTION
17
+
18
+ The brain is an abduction machine, continuously trying to prove abductively that the observables in its environment constitute a coherent situation.
19
+
20
+ - Jerry Hobbs, ACL 2013 Lifetime Achievement Award
21
+
22
+ Abductive reasoning is inference to the most plausible explanation for incomplete observations (Peirce, 1965a). Figure 1 illustrates an example. Given the incomplete observations about the world that $O_{1}$ : "Jenny cleaned her house and went to work, leaving the window just a crack open." and sometime later $O_{2}$ : "When Jenny returned home, she saw her house was a mess," we can hypothesize different potential explanations and reason about which is the most likely. We can readily rule out $H_{3}$ since it fails to justify the observation $O_{2}$ . While $H_{1}$ and $H_{2}$ are both plausible, the most likely explanation based on commonsense is $H_{1}$ as $H_{2}$ is somewhat implausible given $O_{1}$ .
23
+
24
+ One crucial observation Peirce makes about abductive reasoning is that abduction is "the only logical operation which introduces any new ideas", which contrasts with other types of inference such as entailment, that focuses on inferring only such information that is already provided in the premise.
25
+
26
+ ![](images/1509576ac90281b02686a84277b05cbe7f1d4e7071dd1682b858edf217dd44e8.jpg)
27
+ Figure 1: Example of Abductive Reasoning. Given observations $O_{1}$ and $O_{2}$ , the $\alpha$ NLI task is to select the most plausible explanatory hypothesis. Since the number of hypotheses is massive in any given situation, we make a simplifying assumption in our ART dataset to only choose between a pair of explanations.
28
+
29
+ Abductive reasoning has long been considered to be at the core of understanding narratives (Hobbs et al., 1988), reading between the lines (Norvig, 1987; Charniak & Shimony, 1990), reasoning about everyday situations (Peirce, 1965b; Andersen, 1973), and counterfactual reasoning (Pearl, 2002; Pearl & Mackenzie, 2018). Despite the broad recognition of its importance, however, the study of abductive reasoning in narrative text has very rarely appeared in the NLP literature, in large part because most previous work on abductive reasoning has focused on formal logic, which has proven to be too rigid to generalize to the full complexity of natural language.
30
+
31
+ In this paper, we present the first study to investigate the viability of language-based abductive reasoning. This shift from logic-based to language-based reasoning draws inspirations from a significant body of work on language-based entailment (Bowman et al., 2015; Williams et al., 2018b), language-based logic (Lakoff, 1970; MacCartney & Manning, 2007), and language-based commonsense reasoning (Mostafazadeh et al., 2016; Zellers et al., 2018). In particular, we investigate the use of natural language as the representation medium, and probe deep neural models on language-based abductive reasoning.
32
+
33
+ More concretely, we propose Abductive Natural Language Inference ( $\alpha$ NLI) and Abductive Natural Language Generation ( $\alpha$ NLG) as two novel reasoning tasks in narrative contexts. We formulate $\alpha$ NLI as a multiple-choice task to support easy and reliable automatic evaluation: given a context, the task is to choose the more likely explanation from a given pair of hypotheses choices. We also introduce a new challenge dataset, $\mathcal{ART}$ , that consists of 20K narratives accompanied by over 200K explanatory hypothesis. We then establish comprehensive baseline performance based on state-of-the-art NLI and language models. The best baseline for $\alpha$ NLI based on BERT achieves $68.9\%$ accuracy, with a considerable gap compared to human performance of $91.4\%$ (§5.2). The best generative model, based on GPT2, performs well below human performance on the $\alpha$ NLG task (§5.2). Our analysis leads to insights into the types of reasoning that deep pre-trained language models fail to perform — despite their strong performance on the closely related but different task of entailment NLI — pointing to future research directions.
34
+
35
+ # 2 TASK DEFINITION
36
+
37
+ Abductive Natural Language Inference We formulate $\alpha$ NLI as multiple choice problems consisting of a pair of observations as context and a pair of hypothesis choices. Each instance in $\mathcal{ART}$ is defined as follows:
38
+
39
+ - $O_{1}$ : The observation at time $t_{1}$ .
40
+
41
+ - $O_{2}$ : The observation at time $t_{2} > t_{1}$ .
42
+ - $h^+$ : A plausible hypothesis that explains the two observations $O_1$ and $O_2$ .
43
+ - $h^-$ : An implausible (or less plausible) hypothesis for observations $O_1$ and $O_2$ .
44
+
45
+ Given the observations and a pair of hypotheses, the $\alpha$ NLI task is to select the most plausible explanation (hypothesis).
46
+
47
+ Abductive Natural Language Generation $\alpha$ NLG is the task of generating a valid hypothesis $h^+$ given the two observations $O_1$ and $O_2$ . Formally, the task requires to maximize $P(h^{+}|O_{1},O_{2})$ .
48
+
49
+ # 3 MODELS FOR ABDUCTIVE COMMONSENSE REASONING
50
+
51
+ # 3.1 ABDUCTIVE NATURAL LANGUAGE INFERENCE
52
+
53
+ A Probabilistic Framework for $\alpha$ NLI: A distinct feature of the $\alpha$ NLI task is that it requires jointly considering all available observations and their commonsense implications, to identify the correct hypothesis. Formally, the $\alpha$ NLI task is to select the hypothesis $h^*$ that is most probable given the observations.
54
+
55
+ $$
56
+ h ^ {*} = \arg \max _ {h ^ {i}} P \left(H = h ^ {i} \mid O _ {1}, O _ {2}\right) \tag {1}
57
+ $$
58
+
59
+ Rewriting the objective using Bayes Rule conditioned on $O_{1}$ , we have:
60
+
61
+ $$
62
+ P \left(h ^ {i} \mid O _ {1}, O _ {2}\right) \propto P \left(O _ {2} \mid h ^ {i}, O _ {1}\right) P \left(h ^ {i} \mid O _ {1}\right) \tag {2}
63
+ $$
64
+
65
+ We formulate a set of probabilistic models for $\alpha$ NLI that make various independence assumptions on Equation 2 - starting from a simple baseline that ignores the observations entirely, and building up to a fully joint model. These models are depicted as Bayesian Networks in Figure 2.
66
+
67
+ ![](images/8f69461bbf916d6a67c9726845a18030f8bba8c4656ee2685941bdee1e9e3ee6.jpg)
68
+ Figure 2: Illustration of the graphical models described in the probabilistic framework. The "Fully Connected" model can, in theory, combine information from both available observations.
69
+
70
+ Hypothesis Only: Our simplest model makes the strong assumption that the hypothesis is entirely independent of both observations, i.e. $(H \perp O_1, O_2)$ , in which case we simply aim to maximize the marginal $P(H)$ .
71
+
72
+ First (or Second) Observation Only: Our next two models make weaker assumptions: that the hypothesis depends on only one of the first $O_{1}$ or second $O_{2}$ observation.
73
+
74
+ Linear Chain: Our next model uses both observations, but considers each observation's influence on the hypothesis independently, i.e. it does not combine information across the observations. Formally, the model assumes that the three variables $\langle O_1, H, O_2 \rangle$ form a linear Markov chain, where the second observation is conditionally independent of the first, given the hypothesis (i.e. $(O_1 \perp O_2 | H)$ ). Under this assumption, we aim to maximize a somewhat simpler objective than Equation 2:
75
+
76
+ $$
77
+ h ^ {*} = \arg \max _ {h ^ {i}} P \left(O _ {2} \mid h ^ {i}\right) P \left(h ^ {i} \mid O _ {1}\right) \text {w h e r e} \left(O _ {1} \perp O _ {2} \mid H\right) \tag {3}
78
+ $$
79
+
80
+ Fully Connected: Finally, our most sophisticated model jointly models all three random variables as in Equation 2, and can in principle combine information across both observations to choose the correct hypothesis.
81
+
82
+ ![](images/876267a2885b7035ad7cc5663a093caa51bae33e9dea8401cf3d82475761f03e.jpg)
83
+ Figure 3: Overview of an $\alpha$ NLG model that integrates commonsense representations obtained from COMeT (Bosselut et al., 2019) with GPT2. Each observation is input to the COMeT model to obtain nine embeddings, each associated with one commonsense inference type.
84
+
85
+ To help illustrate the subtle distinction between how the Linear Chain and Fully Connected models consider both observations, consider the following example. Let observation $O_{1}$ : "Carl went to the store desperately searching for flour tortillas for a recipe." and $O_{2}$ : "Carl left the store very frustrated." Then consider two distinct hypotheses, an incorrect $h^{1}$ : "The cashier was rude" and the correct $h^{2}$ : "The store had corn tortillas, but not flour ones." For this example, a Linear Chain model could arrive at the wrong answer, because it reasons about the observations separately—taking $O_{1}$ in isolation, both $h^{1}$ and $h^{2}$ seem plausible next events, albeit each a priori unlikely. And for $O_{2}$ in isolation—i.e. in the absence of $O_{1}$ , as for a randomly drawn shopper—the $h^{1}$ explanation of a rude cashier seems a much more plausible explanation of Carl's frustration than are the details of the store's tortilla selection. Combining these two separate factors leads the Linear Chain to select $h^{1}$ as the more plausible explanation. It is only by reasoning about Carl's goal in $O_{1}$ jointly with his frustration in $O_{2}$ , as in the Fully Connected model, that we arrive at the correct answer $h^{2}$ as the more plausible explanation.
86
+
87
+ In our experiments, we encode the different independence assumptions in the best performing neural network model. For the hypothesis-only and single observation models, we can enforce the independencies by simply restricting the inputs of the model to only the relevant variables. On the other hand, the Linear Chain model takes all three variables as input, but we restrict the form of the model to enforce the conditional independence. Specifically, we learn a discriminative classifier:
88
+
89
+ $$
90
+ P _ {\mathrm {L i n e a r C h a i n}} (h | O _ {1}, O _ {2}) \propto e ^ {\phi (O _ {1}, h) + \phi^ {\prime} (h, O _ {2})}
91
+ $$
92
+
93
+ where $\phi$ and $\phi^{\prime}$ are neural networks that produce scalar values.
94
+
95
+ # 3.2 ABDUCTIVE NATURAL LANGUAGE GENERATION
96
+
97
+ Given $h^+ = \{w_1^h \ldots w_l^h\}$ , $O_1 = \{w_1^{o1} \ldots w_m^{o1}\}$ and $O_2 = \{w_1^{o2} \ldots w_n^{o2}\}$ as sequences of tokens, the $\alpha$ NLG task can be modeled as $P(h^+ | O_1, O_2) = \prod P(w_i^h | w_{<i}^h, w_1^{o1} \ldots w_m^{o1}, w_1^{o2} \ldots w_n^{o2})$ . Optionally, the model can also be conditioned on background knowledge $\mathcal{K}$ . Parameterized models can then be trained to minimize the negative log-likelihood over instances in $\mathcal{ART}$ :
98
+
99
+ $$
100
+ \mathcal {L} = - \sum_ {i = 1} ^ {N} \log P \left(w _ {i} ^ {h} \mid w _ {< i} ^ {h}, w _ {1} ^ {o 1} \dots w _ {m} ^ {o 1}, w _ {1} ^ {o 2} \dots w _ {n} ^ {o 2}, \mathcal {K}\right) \tag {4}
101
+ $$
102
+
103
+ # 4 ART DATASET: ABDUCTIVE REASONING IN NARRATIVE TEXT
104
+
105
+ $\mathcal{ART}$ is the first large-scale benchmark dataset for studying abductive reasoning in narrative texts. It consists of $\sim 20\mathrm{K}$ narrative contexts (pairs of observations $\langle O_1,O_2\rangle$ ) with over $200\mathrm{K}$ explanatory hypotheses. Table 6 in the Appendix summarizes corpus-level statistics of the $\mathcal{ART}$ dataset.5 Figure 4 shows some illustrative examples from $\mathcal{ART}$ (dev split). The best model based on BERT fails to correctly predict the first two dev examples.
106
+
107
+ ![](images/a1e03989843e73faa346f29575473a9ceca04d694923810cc8ee971cf73622e1.jpg)
108
+ Figure 4: Examples from ART(dev split). The best model based on BERT fails to correctly predict the first two examples.
109
+
110
+ Collecting Observations: The pairs $O_1, O_2$ in $\mathcal{ART}$ are drawn from the ROCStories dataset (Mostafazadeh et al., 2016). ROCStories is a large collection of short, manually curated five-sentence stories. It was designed to have a clear beginning and ending for each story, which naturally map to the first $(O_1)$ and second $(O_2)$ observations in $\mathcal{ART}$ .
111
+
112
+ Collecting Hypotheses Options: We crowdsourced the plausible and implausible hypotheses options on Amazon Mechanical Turk (AMT) in two separate tasks<sup>6</sup>:
113
+
114
+ 1. Plausible Hypothesis Options: We presented $O_{1}$ and $O_{2}$ as narrative context to crowdworkers who were prompted to fill in "What happened in-between?" in natural language. The design of the task motivates the use of abductive reasoning to hypothesize likely explanations for the two given observations.
115
+ 2. Implausible Hypothesis Options: In this task, we presented workers with observations $O_{1}$ , $O_{2}$ and one plausible hypothesis option $h^{+} \in \mathcal{H}^{+}$ collected from the previous task. Crowdworkers were instructed to make minimal edits (up to 5 words) to a given $h^{+}$ to create implausible hypothesis variations for each plausible hypothesis.
116
+
117
+ A significant challenge in creating datasets is avoiding annotation artifacts – unintentional patterns in the data that leak information about the target label – that several recent studies (Gururanan et al., 2018; Poliak et al., 2018; Tsuchiya, 2018) have reported on crowdsourced datasets. To tackle this challenge, we collect multiple plausible and implausible hypotheses for each $\langle O_1, O_2 \rangle$ pair (as described above) and then apply an adversarial filtering algorithm to retain one challenging pair of hypotheses that are hard to distinguish between. We describe our algorithm in detail in Appendix A.5. While our final dataset uses BERT as the adversary, preliminary experiments that used GPT as an adversary resulted in similar drops in performance of all models, including all BERT variants. We compare the results of the two adversaries in Table 1.
118
+
119
+ # 5 EXPERIMENTS AND RESULTS
120
+
121
+ We now present our evaluation of finetuned state-of-the-art pre-trained language models on the $\mathcal{A}\mathcal{R}\mathcal{T}$ dataset, and several other baseline systems for both $\alpha$ NLI and $\alpha$ NLG. Since $\alpha$ NLI is framed as a binary classification problem, we choose accuracy as our primary metric. For $\alpha$ NLG, we report performance on automated metrics such as BLEU (Papineni et al., 2002), CIDEr (Vedantam et al., 2015), METEOR (Banerjee & Lavie, 2005) and also report human evaluation results.
122
+
123
+ # 5.1 ABDUCTIVE NATURAL LANGUAGE INFERENCE
124
+
125
+ Despite strong performance on several other NLP benchmark datasets, the best baseline model based on BERT achieves an accuracy of just $68.9\%$ on $\mathcal{ART}$ compared to human performance of $91.4\%$ . The large gap between human performance and that of the best system provides significant scope for development of more sophisticated abductive reasoning models. Our experiments show that introducing the additional independence assumptions described in Section 3.1 over the fully connected model tends to degrade system performance (see Table 1) in general.
126
+
127
+ Human Performance We compute human performance using AMT. Each instance (two observations and two hypothesis choices) is shown to three workers who were prompted to choose the more plausible hypothesis choice. We compute majority vote on the labels assigned which leads to a human accuracy of $91.4\%$ on the
128
+
129
+ <table><tr><td>Model</td><td>GPT AF Acc. (%)</td><td>ART Acc. (%)</td></tr><tr><td>Random (2-way choice)</td><td>50.1</td><td>50.4</td></tr><tr><td>Majority (from dev set)</td><td>50.1</td><td>50.8</td></tr><tr><td>Inferent (Conneau et al., 2017)</td><td>50.9</td><td>50.8</td></tr><tr><td>ESIM+ELMo (Chen et al., 2017)</td><td>58.2</td><td>58.8</td></tr><tr><td colspan="3">Finetuning Pre-trained LMs</td></tr><tr><td>GPT-ft</td><td>52.6 (0.9)</td><td>63.1 (0.5)</td></tr><tr><td>BERT-ft [h^i Only]</td><td>55.9 (0.7)</td><td>59.5 (0.2)</td></tr><tr><td>BERT-ft [O1 Only]</td><td>63.9 (0.8)</td><td>63.5 (0.7)</td></tr><tr><td>BERT-ft [O2 Only]</td><td>68.1 (0.6)</td><td>66.6 (0.2)</td></tr><tr><td>BERT-ft [Linear Chain]</td><td>65.3 (1.4)</td><td>68.9 (0.5)</td></tr><tr><td>BERT-ft [Fully Connected]</td><td>72.0 (0.5)</td><td>68.6 (0.5)</td></tr><tr><td>Human Performance</td><td>-</td><td>91.4</td></tr></table>
130
+
131
+ Table 1: Performance of baselines and finetuned-LM approaches on the test set of $\mathcal{A}\mathcal{R}\mathcal{T}$ . Test accuracy is reported as the mean of five models trained with random seeds, with the standard deviation in parenthesis.
132
+
133
+ ARTtest set.
134
+
135
+ Baselines We include baselines that rely on simple features to verify that $\mathcal{A}\mathcal{R}\mathcal{T}$ is not trivially solvable due to noticeable annotation artifacts, observed in several crowdsourced datasets. The accuracies of all simple baselines are close to chance-performance on the task – indicating that the dataset is free of simple annotation artifacts.
136
+
137
+ A model for the related but distinct task of entailment NLI (e.g. SNLI) forms a natural baseline for $\alpha$ NLI. We re-train the ESIM+ELMo (Chen et al., 2017; Peters et al., 2018) model as its performance on entailment NLI (88.9%) is close to state-of-the-art models (excluding pre-trained language models). This model only achieves an accuracy of $58.8\%$ highlighting that performing well on $\mathcal{ART}$ requires models to go far beyond the linguistic notion of entailment.
138
+
139
+ Pre-trained Language Models BERT (Devlin et al., 2018) and GPT (Radford, 2018) have recently been shown to achieve state-of-the-art results on several NLP benchmarks (Wang et al., 2018). We fine-tune both BERT-Large and GPT as suggested in previous work and we present each instance in their natural narrative order. BERT-ft (fully connected) is the best performing model achieving $68.9\%$ accuracy, compared to GPT's $63.1\%$ . Our AF approach was able to reduce BERT performance from over $88\%$ by 20 points.
140
+
141
+ ![](images/b697c8c3df697ed931ff891c42952405037e07677acccdfe3b2777fff53598b0.jpg)
142
+ Figure 5: BERT learning curve on the dev set of $\mathcal{A}\mathcal{R}\mathcal{T}$ . For each point on the x-axis, we fine-tune BERT with five random seeds. Human performance is $91.4\%$ .
143
+
144
+ Learning Curve and Dataset Size While there is enough scope for considerably scaling up the dataset based on ROCStories, the learning curve in Figure 5 shows that the performance of the best model plateaus after $\sim 10,000$ instances. I performance of the best model and human
145
+
146
+ In addition, there is still a wide gap $(\sim 23\%)$ between the performance.
147
+
148
+ <table><tr><td>Model</td><td>BLEU</td><td>METEOR</td><td>ROUGE</td><td>CIDEr</td><td>BERT-Score</td><td>Human</td></tr><tr><td>GPT2-Fixed</td><td>0.0</td><td>9.29</td><td>9.99</td><td>3.34</td><td>36.69</td><td>-</td></tr><tr><td>O1-O2-Only</td><td>2.23</td><td>16.71</td><td>22.83</td><td>33.54</td><td>48.74</td><td>42.26</td></tr><tr><td>COMeT-Txt+GPT2</td><td>2.29</td><td>16.73</td><td>22.51</td><td>31.99</td><td>48.46</td><td>38.28</td></tr><tr><td>COMeT-Emb+GPT2</td><td>3.03</td><td>17.66</td><td>22.93</td><td>32.00</td><td>48.52</td><td>44.56</td></tr><tr><td>Human-written Hypotheses</td><td>8.25</td><td>26.71</td><td>30.40</td><td>53.56</td><td>53.30</td><td>96.03</td></tr></table>
149
+
150
+ Table 2: Performance of generative models on the test set of $\mathcal{A}\mathcal{R}\mathcal{T}$ . All models except GPT2-Fixed are finetuned on $\mathcal{A}\mathcal{R}\mathcal{T}$ .
151
+
152
+ GPT Adversary Table 1 also includes results of our experiments where GPT was used as the adversary. Notably, in this case, adversarially filtering the dataset brings down GPT performance under $53\%$ . On the other hand, the best BERT model, that encodes the fully connected bayesian network performs significantly better than the BERT model that encodes the linear chain assumptions $-72\%$ compared to $65\%$ . Therefore, we use the BERT fully connected model as the adversary in $\mathcal{ART}$ . The gap between the linear chain and fully connected BERT models diminishes when BERT is used as an adversary – in spite of being a more powerful model – which indicates that adversarial filtering disproportionately impacts the model used as the adversary. However, the dataset also becomes more difficult for the other models that were not used as adversaries. For example, before any filtering, BERT scores $88\%$ and OpenGPT gets $80\%$ , which is much higher than either model achieves in Table 1 when the other model is used for filtering. This result is a reasonable indicator, albeit not a guarantee, that $\mathcal{ART}$ will remain challenging for new models released in the future.
153
+
154
+ # 5.2 ABDUCTIVE NATURAL LANGUAGE GENERATION
155
+
156
+ Generative Language Models As described in Equation 4, we train GPT2 conditioned on the tokens of the two observations $O_{1}$ and $O_{2}$ . Both observations are enclosed with field-specific tags. ATOMIC (Sap et al., 2019), a repository of inferential if-then knowledge is a natural source of background commonsense required to reason about narrative contexts in $\mathcal{ART}$ . Yet, there is no straightforward way to include such knowledge into a neural model as ATOMIC's nodes are not canonicalized and are represented as short phrases of text. Thus, we rely on COMeT - a transformer model trained on ATOMIC that generates nine commonsense inferences of events in natural language. Specifically, we experiment with two ways of integrating information from COMeT in GPT2: (i) as textual phrases, and (ii) as embeddings.
157
+
158
+ Figure 3 shows how we integrate COMeT representations. Concretely, after the input tokens are embedded by the word-embedding layer, we append eighteen (corresponding to nine relations for each observation) embeddings to the sequence before passing through the layers of the Transformer architecture. This allows the model to learn each token's representation while attending to the COMeT embeddings – effectively integrating background commonsense knowledge into a language model.[10]
159
+
160
+ Discussion Table 2 reports results on the $\alpha$ NLG task. Among automatic metrics, we report BLEU-4 (Papineni et al., 2002), METEOR (Banerjee & Lavie, 2005), ROUGE (Lin, 2004), CIDEr (Vedantam et al., 2015) and BERT-Score (Zhang et al., 2019) (with the bert-base-uncased model). We establish human performance through crowdsourcing on AMT. Crowdworkers are shown pairs of observations and a generated hypothesis and asked to label whether the hypothesis explains the given observations. The last column reports the human evaluation score. The last row reports the score of a held-out human-written hypothesis and serves as a ceiling for model performance. Human-written hypotheses are found to be correct for $96\%$ of instances, while our best generative models, even when enhanced with background commonsense knowledge, only achieve $45\%$ - indicating that the $\alpha$ NLG generation task is especially challenging for current state-of-the-art text generators.
161
+
162
+ # 6 ANALYSIS
163
+
164
+ # 6.1 $\alpha$ NLI
165
+
166
+ Commonsense reasoning categories We investigate the categories of commonsense-based abductive reasoning that are challenging for current systems and the ones where the best model over-performs. While there have been previous attempts to categorize commonsense knowledge required for entailment (LoBue & Yates, 2011; Clark et al., 2007), crowdsourcing this task at scale with high fidelity and high agreement across annotators remains challenging. Instead, we aim to probe the model with soft categories identified by matching lists of category-specific keywords to the hypothesis choices.
167
+
168
+ <table><tr><td>Category</td><td>Human Accuracy</td><td>BERT Accuracy</td><td>Δ</td></tr><tr><td>All (1,000)</td><td>91.4</td><td>68.8</td><td>22.6</td></tr><tr><td>Numerical (44)</td><td>88.6</td><td>56.8</td><td>21.8</td></tr><tr><td>Spatial (130)</td><td>91.5</td><td>65.4</td><td>26.1</td></tr><tr><td>Emotional (84)</td><td>86.9</td><td>72.6</td><td>14.3</td></tr></table>
169
+
170
+ Table 3 shows the accuracy of the best model (BERT-ft) across various categories of commonsense knowledge. BERT-ft significantly underperforms on instances involving Numerical $(56.8\%)$ and Spatial $(65.4\%)$ commonsense. These two categories include reasoning about numerical quantities and the spatial location of agents and objects, and highlight some of the limitations of the language models. In contrast, it significantly overperforms on the Emotional category $(72.6\%)$ where the hypotheses exhibit strong textual cues about emotions and sentiments.
171
+
172
+ Implausible transitions A model for an instance of the $\mathcal{ART}$ dataset should discard implausible hypotheses in the context of the two given observations. In narrative contexts, there are three main reasons for an implausible hypothesis to be labeled as such:
173
+
174
+ 1. $O_{1} \nrightarrow h^{-}$ : $h^{-}$ is unlikely to follow after the first observation $O_{1}$ .
175
+ 2. $h^{-} \nrightarrow O_{2}$ : $h^{-}$ is plausible after $O_{1}$ but unlikely to precede the second observation $O_{2}$ .
176
+ 3. Plausible: $\langle O_1, h^-, O_2 \rangle$ is a coherent narrative and forms a plausible alternative, but it is less plausible than $\langle O_1, h^+, O_2 \rangle$ .
177
+
178
+ Table 3: BERT's performance and human evaluation on categories for 1,000 instances from the test set, based on commonsense reasoning domains (Numerical, Spatial, Emotional). The number in parenthesis indicates the size of the category.
179
+
180
+ <table><tr><td>Story Transition</td><td>% of Dataset</td><td>BERT-ft Fully Connected Acc. (%)</td><td>BERT-ft Linear Chain Acc. (%)</td></tr><tr><td>O1→h-</td><td>32.5</td><td>73.6</td><td>71.6</td></tr><tr><td>h-→O2</td><td>45.3</td><td>69.0</td><td>70.5</td></tr><tr><td>Plausible</td><td>22.2</td><td>62.5</td><td>58.5</td></tr><tr><td>All (1,000)</td><td>100.0</td><td>69.1</td><td>68.2</td></tr></table>
181
+
182
+ Table 4: Fraction of dataset for which a particular transition in the story is broken for the negative hypothesis, for 1,000 random instances from the test set.
183
+
184
+ We analyze the prevalence of each of these reasons in $\mathcal{ART}$ . We design a crowdsourcing task in which we show the implausible option along with the narrative context $\langle O_1, O_2 \rangle$ and get labels for which transition $(O_1 \nRightarrow h^-, h^-\nRightarrow O_2$ or neither) in the narrative chain is broken. Table 4 shows the proportion of each category from a subset of 1,000 instances from the test set. While $h^-\nRightarrow O_2$ accounts for almost half of the implausible transitions in $\mathcal{ART}$ , all three categories are substantially present in the dataset. BERT performance on each of these categories indicates that the model finds it particularly hard when the narrative created by the incorrect hypothesis is plausible, but less plausible than the correct hypothesis. On that subset of the test set, the fully connected model performs better than the linear chain model where it is important to consider both observations jointly to arrive at the more likely hypothesis.
185
+
186
+ # 6.2 $\alpha$ NLG
187
+
188
+ Figure 6 shows some examples of generations from the trained models compared to human-written generations. The example on the left is an example of an instance that only humans could get correct, while for the one on the right, COMeT-Emb+GPT2also generates the correct explanation for the observations.
189
+
190
+ ![](images/b28b242b0d221c0278613bf7848f8da5a0171856b671600449061d921a6c2770.jpg)
191
+ Figure 6: Examples of generated hypotheses from different models and human-written hypothesis for 2 instances from ARCT.
192
+
193
+ # 7 TRANSFER LEARNING FROM ART
194
+
195
+ $\mathcal{ART}$ contains a large number of questions for the novel abductive reasoning task. In addition to serving as a benchmark, we investigate if $\mathcal{ART}$ can be used as a resource to boost performance on other commonsense tasks. We apply transfer learning by first training a model on $\mathcal{ART}$ , and subsequently training on four target datasets – WinoGrande Sakaguchi et al. (2020), WSC Levesque et al. (2011), DPR Rahman & Ng (2012) and HellaSwag Zellers et al. (2019). We show that compared to a model that is only trained on the target dataset, a model that is sequentially trained on $\mathcal{ART}$ first and then on the target dataset can perform better. In particular, pre-training on $\mathcal{ART}$ consistently improves performance on related datasets when they have relatively few training examples.
196
+
197
+ On the other hand, for target datasets with large amounts of training data, pre-training on $\mathcal{ART}$ does not provide a significant improvement.
198
+
199
+ # 8 RELATED WORK
200
+
201
+ Cloze-Style Task vs. Abductive Reasoning Since abduction is fundamentally concerned with plausible chains of cause-and-effect, our work draws inspiration from previous works that deal with narratives such as script learning
202
+
203
+ (Schank & Abelson, 1975) and the narrative cloze test (Chambers & Jurafsky, 2009; Jans et al., 2012; Pichotta & Mooney, 2014; Rudinger et al., 2015). Rather than learning prototypical scripts or narrative chains, we instead reason about the most plausible events conditioned on observations. We make use of the ROCStories dataset (Mostafazadeh et al., 2016), which was specifically designed for the narrative cloze task. But, instead of reasoning about plausible event sequences, our task requires reasoning about plausible explanations for narrative omissions.
204
+
205
+ <table><tr><td>Dataset</td><td>BERT-ft(D)</td><td>BERT-ft(ART)→ BERT-ft(D)</td></tr><tr><td>WinoGrande</td><td>65.8%</td><td>67.2%</td></tr><tr><td>Sakaguchi et al. (2020)</td><td></td><td></td></tr><tr><td>WSC</td><td>70.0%</td><td>74.0%</td></tr><tr><td>Levesque et al. (2011)</td><td></td><td></td></tr><tr><td>DPR</td><td>72.5%</td><td>86.0%</td></tr><tr><td>Rahman &amp; Ng (2012)</td><td></td><td></td></tr><tr><td>Hellaswag</td><td>46.7%</td><td>46.1%</td></tr><tr><td>Zellers et al. (2019)</td><td></td><td></td></tr></table>
206
+
207
+ Table 5: Transfer Learning from $\mathcal{ART}$
208
+
209
+ Entailment vs. Abductive Reasoning The formulation of $\alpha$ NLI is closely related to entailment NLI, but there are two critical distinctions that make abductive reasoning uniquely challenging. First, abduction requires reasoning about commonsense implications of observations (e.g., if we observe that the "grass is wet", a likely hypothesis is that "it rained earlier") which go beyond the linguistic notion of entailment (also noted by Josephson (2000)). Second, abduction requires non-monotonic reasoning about a set of commonsense implications collectively, to check the potential contradictions against multiple observations and to compare the level of plausibility of different hypotheses. This makes abductive reasoning distinctly challenging compared to other forms of reasoning such as induction and deduction (Shank, 1998). Perhaps more importantly, abduction is closely related to the kind of reasoning humans perform in everyday situations, where information is incomplete and definite inferences cannot be made.
210
+
211
+ Generative Language Modeling Recent advancements in the development of large-scale pretrained language models (Radford, 2018; Devlin et al., 2018; Radford et al., 2019) have improved the quality and coherence of generated language. Although these models have shown to generate reasonably coherent text when condition on a sequence of text, our experiments highlight the limitations of these models to 1) generate language non-monotonically and 2) adhere to commonsense knowledge. We attempt to overcome these limitations with the incorporation of a generative commonsense model during hypothesis generation.
212
+
213
+ Related Datasets Our new resource ART complements ongoing efforts in building resources for natural language inference (Dagan et al., 2006; MacCartney & Manning, 2009; Bowman et al., 2015; Williams et al., 2018a; Camburu et al., 2018). Existing datasets have mostly focused on textual entailment in a deductive reasoning set-up (Bowman et al., 2015; Williams et al., 2018a) and making inferences about plausible events (Maslan et al., 2015; Zhang et al., 2017). In their typical setting, these datasets require a system to deduce the logically entailed consequences of a given premise. In contrast, the nature of abduction requires the use of commonsense reasoning capabilities, with less focus on lexical entailment. While abductive reasoning has been applied to entailment datasets (Raina et al., 2005), they have been applied in a logical theorem-proving framework as an intermediate step to perform textual entailment – a fundamentally different task than $\alpha$ NLI.
214
+
215
+ # 9 CONCLUSION
216
+
217
+ We present the first study that investigates the viability of language-based abductive reasoning. We conceptualize and introduce Abductive Natural Language Inference $(\alpha \mathrm{NLI})$ - a novel task focused on abductive reasoning in narrative contexts. The task is formulated as a multiple-choice question-answering problem. We also introduce Abductive Natural Language Generation $(\alpha \mathrm{NLG})$ - a novel task that requires machines to generate plausible hypotheses for given observations. To support these tasks, we create and introduce a new challenge dataset, $\mathcal{A}\mathcal{R}\mathcal{T}$ , which consists of 20,000 commonsense narratives accompanied with over 200,000 explanatory hypotheses. In our experiments, we establish comprehensive baseline performance on this new task based on state-of-the-art NLI and language models, which leads to $68.9\%$ accuracy with a considerable gap with human performance $(91.4\%)$ . The $\alpha \mathrm{NLG}$ task is significantly harder - while humans can write a valid explanation $96\%$ of times, the best generator models can only achieve $45\%$ . Our analysis leads to new insights into the types of reasoning that deep pre-trained language models fail to perform - despite their strong performance on the closely related but different task of entailment NLI - pointing to interesting avenues for future research. We hope that $\mathcal{A}\mathcal{R}\mathcal{T}$ will serve as a challenging benchmark for future research in language-based abductive reasoning and the $\alpha \mathrm{NLI}$ and $\alpha \mathrm{NLG}$ tasks will encourage representation learning that enables complex reasoning capabilities in AI systems.
218
+
219
+ # ACKNOWLEDGMENTS
220
+
221
+ We thank the anonymous reviewers for their insightful feedback. This research was supported in part by NSF (IIS-1524371), the National Science Foundation Graduate Research Fellowship under Grant No. DGE 1256082, DARPA CwC through ARO (W911NF15-1-0543), DARPA MCS program through NIWC Pacific (N66001-19-2-4031), and the Allen Institute for AI. Computations on beaker.org were supported in part by credits from Google Cloud.
222
+
223
+ # REFERENCES
224
+
225
+ Henning Andersen. Abductive and deductive change. Language, pp. 765-793, 1973. URL https://www.jstor.org/stable/pdf/412063.pdf.
226
+ Satanjeev Banerjee and Alon Lavie. Meteor: An automatic metric for mt evaluation with improved correlation with human judgments. In Proceedings of the acl workshop on intrinsic and extrinsic evaluation measures for machine translation and/or summarization, pp. 65-72, 2005.
227
+
228
+ Antoine Bosselut, Hannah Rashkin, Maarten Sap, Chaitanya Malaviya, Asli Celikyilmaz, and Yejin Choi. Comet: Commonsense transformers for automatic knowledge graph construction. arXiv preprint arXiv:1906.05317, 2019.
229
+ Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. A large annotated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP). Association for Computational Linguistics, 2015. URL https://nlp.stanford.edu/pubs/snli_paper.pdf.
230
+ Oana-Maria Camburu, Tim Rocktäschel, Thomas Lukasiewicz, and Phil Blunsom. e-snli: Natural language inference with natural language explanations. In Advances in Neural Information Processing Systems, pp. 9560-9572, 2018. URL https://papers.nips.cc/paper/8163-e-snli-natural-language-inference-with-natural-language-explanations.pdf.
231
+ Nathanael Chambers and Dan Jurafsky. Unsupervised learning of narrative schemas and their participants. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International Joint Conference on Natural Language Processing of the AFNLP, pp. 602-610, Suntec, Singapore, August 2009. Association for Computational Linguistics. URL http://www.aclweb.org/anthology/P/P09/P09-1068.
232
+ Eugene Charniak and Solomon Eyal Shimony. Probabilistic semantics for cost based abduction. Brown University, Department of Computer Science, 1990. URL https://www.aaaai.org/Papers/AAAI/1990/AAAI90-016.pdf.
233
+ Qian Chen, Xiao-Dan Zhu, Zhen-Hua Ling, Si Wei, Hui Jiang, and Diana Inkpen. Enhanced LSTM for natural language inference. In ACL, 2017. URL https://www.aclweb.org/anthology/P17-1152.
234
+ Peter E. Clark, Philip Harrison, John A. Thompson, William R. Murray, Jerry R. Hobbs, and Christiane Fellbaum. On the role of lexical and world knowledge in rte3. In ACL-PASCAL@ACL, 2007. URL https://www.aclweb.org/anthology/W07-1409.
235
+ Alexis Conneau, Douwe Kiela, Holger Schwenk, Loic Barrault, and Antoine Bordes. Supervised learning of universal sentence representations from natural language inference data. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pp. 670-680, Copenhagen, Denmark, September 2017. Association for Computational Linguistics. doi: 10.18653/v1/D17-1070. URL https://www.aclweb.org/anthology/D17-1070.
236
+ Ido Dagan, Oren Glickman, and Bernardo Magnini. The Pascal recognising textual entailment challenge. In Machine learning challenges: evaluating predictive uncertainty, visual object classification, and recognising tactual entailment, pp. 177-190. Springer, 2006. URL http://u.cs.biu.ac.il/~dagan/publications/RTEChallenge.pdf.
237
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. URL https://arxiv.org/abs/1810.04805.
238
+ Suchin Gururangan, Swabha Swayamdipta, Omer Levy, Roy Schwartz, Samuel Bowman, and Noah A. Smith. Annotation artifacts in natural language inference data. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pp. 107-112, New Orleans, Louisiana, June 2018. Association for Computational Linguistics. doi: 10.18653/v1/N18-2017. URL https://www.aclweb.org/anthology/N18-2017.
239
+ Jerry R. Hobbs, Mark Stickel, Paul Martin, and Douglas Edwards. Interpretation as abduction. In Proceedings of the 26th Annual Meeting of the Association for Computational Linguistics, pp. 95-103, Buffalo, New York, USA, June 1988. Association for Computational Linguistics. doi: 10.3115/982023.982035. URL https://www.aclweb.org/anthology/P88-1012.
240
+ Bram Jans, Steven Bethard, Ivan Vulic, and Marie-Francine Moens. Skip n-grams and ranking functions for predicting script events. In Proceedings of the 13th Conference of the European Chapter
241
+
242
+ of the Association for Computational Linguistics, pp. 336-344, Avignon, France, April 2012. Association for Computational Linguistics. URL http://www.aclweb.org/anthology/E12-1034.
243
+ Susan G. Josephson. Abductive inference: Computation, philosophy, technology. 2000. URL https://philpapers.org/rec/JOSAIC.
244
+ George Lakoff. Linguistics and natural logic. Synthese, 22(1-2):151-271, 1970. URL https://link.springer.com/article/10.1007/BF00413602.
245
+ Hector J. Levesque, Ernest Davis, and Leora Morgenstern. The winograd schema challenge. In $KR$ , 2011.
246
+ Chin-Yew Lin. Rouge: A package for automatic evaluation of summaries. Text Summarization Branches Out, 2004.
247
+ Peter LoBue and Alexander Yates. Types of common-sense knowledge needed for recognizing textual entailment. In ACL, 2011. URL https://www.aclweb.org/anthology/P11-2057.
248
+ Bill MacCartney and Christopher D. Manning. Natural logic for textual inference. In Proceedings of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pp. 193-200, Prague, June 2007. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/W07-1431.
249
+ Bill MacCartney and Christopher D. Manning. An extended model of natural logic. In Proceedings of the Eight International Conference on Computational Semantics, pp. 140-156, Tilburg, The Netherlands, January 2009. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/W09-3714.
250
+ Nicole Maslan, Melissa Roemmele, and Andrew S. Gordon. One hundred challenge problems for logical formalizations of commonsense psychology. In AAAI Spring Symposia, 2015. URL http://people.ict.usc.edu/~gordon/publications/AAAI-SPRING15.PDF.
251
+ Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. A corpus and cloze evaluation for deeper understanding of commonsense stories. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL), pp. 839-849. Association for Computational Linguistics, 2016. doi: 10.18653/v1/N16-1098. URL http://aclweb.org/anthology/N16-1098.
252
+ Peter Norvig. Inference in text understanding. In AAAI, pp. 561-565, 1987. URL http://norvig.com/aaai87.pdf.
253
+ Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In ACL, 2002.
254
+ Judea Pearl. Reasoning with cause and effect. AI Magazine, 23(1):95, 2002. URL https://ftp.cs.ucla.edu/pub/stat_ser/r265-ai-mag.pdf.
255
+ Judea Pearl and Dana Mackenzie. The Book of Why: The New Science of Cause and Effect. Basic Books, Inc., New York, NY, USA, 1st edition, 2018. ISBN 046509760X, 9780465097609. URL https://dl.acm.org/citation.cfm?id=3238230.
256
+ Charles Sanders Peirce. Collected papers of Charles Sanders Peirce, volume 5. Harvard University Press, 1965a. URL http://www.hup.harvard.edu/catalog.php?isbn=9780674138001.
257
+ Charles Sanders Peirce. *Pragmatism and pragmaticism*, volume 5. Belknap Press of Harvard University Press, 1965b. URL https://www.jstor.org/stable/224970.
258
+
259
+ Jeffrey Pennington, Richard Socher, and Christopher Manning. Glove: Global vectors for word representation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 1532-1543, Doha, Qatar, October 2014. Association for Computational Linguistics. doi: 10.3115/v1/D14-1162. URL https://www.aclweb.org/anthology/D14-1162.
260
+ Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. Deep contextualized word representations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 2227-2237, New Orleans, Louisiana, June 2018. Association for Computational Linguistics. doi: 10.18653/v1/N18-1202. URL https://www.aclweb.org/anthology/N18-1202.
261
+ Karl Pichotta and Raymond Mooney. Statistical script learning with multi-argument events. In Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics, pp. 220–229, Gothenburg, Sweden, April 2014. Association for Computational Linguistics. URL http://www.aclweb.org/anthology/E14-1024.
262
+ Adam Poliak, Jason Naradowsky, Aparajita Haldar, Rachel Rudinger, and Benjamin Van Durme. Hypothesis only baselines in natural language inference. In Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics, pp. 180-191, New Orleans, Louisiana, June 2018. Association for Computational Linguistics. doi: 10.18653/v1/S18-2023. URL https://www.aclweb.org/anthology/S18-2023.
263
+ Alec Radford. Improving language understanding by generative pre-training. 2018.
264
+ Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. Language models are unsupervised multitask learners. OpenAI Blog, 1(8), 2019.
265
+ Altaf Rahman and Vincent Ng. Resolving complex cases of definite pronouns: The winograd schema challenge. In EMNLP-CoNLL, 2012.
266
+ Rajat Raina, Andrew Y Ng, and Christopher D Manning. Robust textual inference via learning and abductive reasoning. In AAAI, pp. 1099-1105, 2005. URL https://nlp.stanford.edu/~manning/papers/aaai05-learnabduction.pdf.
267
+ Rachel Rudinger, Pushpendre Rastogi, Francis Ferraro, and Benjamin Van Durme. Script induction as language modeling. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pp. 1681-1686, Lisbon, Portugal, September 2015. Association for Computational Linguistics. URL http://aclweb.org/anthology/D15-1195.
268
+ Keisuke Sakaguchi, Ronan Le Bras, Chandra Bhagavatula, and Yejin Choi. Winogrande: An adversarial winograd schema challenge at scale. In AAAI, 2020.
269
+ Maarten Sap, Ronan Le Bras, Emily Allaway, Chandra Bhagavatula, Nicholas Lourie, Hannah Rashkin, Brendan Roof, Noah A Smith, and Yejin Choi. Atomic: an atlas of machine commonsense for if-then reasoning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 3027-3035, 2019.
270
+ Roger C. Schank and Robert P. Abelson. Scripts, plans, and knowledge. In Proceedings of the 4th International Joint Conference on Artificial Intelligence - Volume 1, IJCAI'75, pp. 151-157, San Francisco, CA, USA, 1975. Morgan Kaufmann Publishers Inc. URL http://dl.acm.org/citation.cfm?id=1624626.1624649.
271
+ Gary Shank. The extraordinary ordinary powers of abductive reasoning. Theory & Psychology, 8(6):841-860, 1998. URL https://journals.sagepub.com/doi/10.1177/0959354398086007.
272
+ Masatoshi Tsuchiya. Performance impact caused by hidden bias of training data for recognizing textual entailment. CoRR, abs/1804.08117, 2018. URL http://www.lrec-conf.org/proceedings/lrec2018/pdf/786.pdf.
273
+
274
+ Ramakrishna Vedantam, C Lawrence Zitnick, and Devi Parikh. Cider: Consensus-based image description evaluation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4566-4575, 2015.
275
+ Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pp. 353-355, Brussels, Belgium, November 2018. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/W18-5446.
276
+ Adina Williams, Nikita Nangia, and Samuel Bowman. A broad-coverage challenge corpus for sentence understanding through inference. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume I (Long Papers), pp. 1112-1122. Association for Computational Linguistics, 2018a. URL http://aclweb.org/anthology/N18-1101.
277
+ Adina Williams, Nikita Nangia, and Samuel Bowman. A broad-coverage challenge corpus for sentence understanding through inference. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 1112-1122, New Orleans, Louisiana, June 2018b. Association for Computational Linguistics. doi: 10.18653/v1/N18-1101. URL https://www.aclweb.org/anthology/N18-1101.
278
+ Rowan Zellers, Yonatan Bisk, Roy Schwartz, and Yejin Choi. Swag: A large-scale adversarial dataset for grounded commonsense inference. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP), 2018. URL https://aclweb.org/anthology/D18-1009.
279
+ Rowan Zellers, Ari Holtzman, Yonatan Bisk, Ali Farhadi, and Yejin Choi. Hellaswag: Can a machine really finish your sentence? In ACL, 2019.
280
+ Sheng Zhang, Rachel Rudinger, Kevin Duh, and Benjamin Van Durme. Ordinal common-sense inference. Transactions of the Association for Computational Linguistics, 5:379-395, 2017. doi: 10.1162/tacl_a_00068. URL https://www.aclweb.org/anthology/Q17-1027.
281
+ Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q Weinberger, and Yoav Artzi. Bertscore: Evaluating text generation with bert. arXiv preprint arXiv:1904.09675, 2019.
282
+
283
+ # A APPENDICES
284
+
285
+ # A.1 DATA COLLECTION DETAILS
286
+
287
+ We describe the crowdsourcing details of our data collection method.
288
+
289
+ Task 1 - Plausible Hypothesis Options In this task, participants were presented an incomplete three-part story, which consisted of the first observation $(O_{1})$ and the second observation $(O_{2})$ of the story. They were then asked to complete the story by writing a probable middle sentence that explains why the second observation should follow after the first one. We instructed participants to make sure that the plausible middle sentence (1) is short (fewer than 10 words) and (2) simple as if narrating to a child, (3) avoids introducing any extraneous information, and (4) uses names instead of pronouns (e.g., he/she) wherever possible.
290
+
291
+ All participants were required to meet the following qualification requirements: (1) their location is in the US, (2) HIT approval rate is greater than $95\%$ , and (3) Number of HITs approved is greater than 5,000. The reward of this task was set to be $0.07 per question ($ 14/hour in average), and each HIT was assigned to five different workers (i.e., 5-way redundancy).
292
+
293
+ Task 2 - Implausible Hypothesis Options In this task, participants were presented a three-part story, which consisted of the first observation $(O_1)$ , a middle sentence $(h^{+})$ collected in Task 1, and the second observation $(O_2)$ of the story. They were then asked to rewrite the middle sentence $(h^{+})$ with minimal changes, so that the story becomes unlikely, implausible or inconsistent $(h^{-})$ . We asked participants to add or remove at most four words to $h^{+}$ , while ensuring that the new middle sentence is grammatical. In addition, we asked them to stick to the context in the given story. For example, if the story talks about "doctors", they are welcome to talk about "health" or "diagnosis", but not mention "aliens". Finally, we also asked workers to verify if the given middle $(h^{+})$ makes a plausible story, in order to confirm the plausibility of $h^{+}$ collected in Task 1.
294
+
295
+ With respect to this task's qualification, participants were required to fulfill the following requirements: (1) their location is the US or Canada, (2) HIT approval rate is greater than or equal to $99(\%)$ , and (3) number of HITs approved is greater than or equal to 10,000. Participants were paid $0.1 per question ($14/hour in average), and each HIT was assigned to three different participants (i.e., 3-way redundancy).
296
+
297
+ Task 3 - $\alpha$ NLI Human Performance Human performance was evaluated by asking participants to answer the $\alpha$ NLI questions. Given a narrative context $\langle O_1, O_2 \rangle$ and two hypotheses, they were asked to choose the more plausible hypothesis. They were also allowed to choose "None of the above" when neither hypothesis was deemed plausible.
298
+
299
+ We asked each question to seven participants with the following qualification requirements: (1) their location is either in the US, UK, or Canada, (2) HIT approval rate is greater than $98(\%)$ , (3) Number of HITs approved is greater than 10,000. The reward was set to $\$0.05$ per HIT. We took the majority vote among the seven participants for every question to compute human performance.
300
+
301
+ # A.2 ARDATA STATISTICS
302
+
303
+ Table 6 shows some statistics of the ART dataset.
304
+
305
+ # A.3 FINE-TUNING BERT
306
+
307
+ We fine-tuned the BERT model using a grid search with the following set of hyper-parameters:
308
+
309
+ - batch size: $\{3, 4, 8\}$
310
+ number of epochs: $\{3,4,10\}$
311
+ learning rate: $\{1\mathrm{e} - 5,2\mathrm{e} - 5,3\mathrm{e} - 5,5\mathrm{e} - 5\}$
312
+
313
+ The warmup proportion was set to 0.2, and cross-entropy was used for computing the loss. The best performance was obtained with a batch size of 4, learning rate of 5e-5, and number of epochs equal to 10. Table 7 describes the input format for GPT and BERT (and its variants).
314
+
315
+ <table><tr><td></td><td>Train</td><td>Dev</td><td>Test</td></tr><tr><td colspan="4">Total unique occurrences</td></tr><tr><td>Contexts ⟨O1,O2⟩</td><td>17,801</td><td>1,532</td><td>3,059</td></tr><tr><td>Plausible hyp. h+</td><td>72,046</td><td>1,532</td><td>3,059</td></tr><tr><td>Implausible hyp. h-</td><td>166,820</td><td>1,532</td><td>3,059</td></tr><tr><td colspan="4">Avg. size per context</td></tr><tr><td>Plausible hyp. h+</td><td>4.05</td><td>1</td><td>1</td></tr><tr><td>Implausible hyp. h-</td><td>9.37</td><td>1</td><td>1</td></tr><tr><td colspan="4">Avg. word length</td></tr><tr><td>Plausible hyp. h+</td><td>8.34</td><td>8.62</td><td>8.54</td></tr><tr><td>Implausible hyp. h-</td><td>8.28</td><td>8.55</td><td>8.53</td></tr><tr><td>First observation O1</td><td>8.09</td><td>8.07</td><td>8.17</td></tr><tr><td>Second observation O2</td><td>9.29</td><td>9.3</td><td>9.31</td></tr></table>
316
+
317
+ # A.4 BASELINES
318
+
319
+ The SVM classifier is trained on simple features like word length, overlap and sentiment features to select one of the two hypothesis choices. The bag-of-words baseline computes the average of GloVe (Pennington et al., 2014) embeddings for words in each sentence to form sentence embeddings. The sentence embeddings in a story (two observations and a hypothesis option) are concatenated and passed through fully-connected layers to produce a score for each hypothesis. The accuracies of both baselines are close to $50\%$ (SVM: 50.6; BOW: 50.5).
320
+
321
+ Specifically, we train an SVM classifier and a bag-of-words model using GLoVE embeddings. Both models achieve accuracies close to $50\%$ . An Inferent (Conneau et al., 2017) baseline that uses sentences embedded by max-pooling over Bi-LSTM token representations achieves only $50.8\%$ accuracy.
322
+
323
+ Table 6: Some statistics summarizing the ARIT dataset. The train set includes all plausible and implausible hypotheses collected via crowdsourcing, while the dev and test sets include the hypotheses selected through the Adversarial Filtering algorithm.
324
+
325
+ <table><tr><td>Model</td><td>Input Format</td></tr><tr><td>GPT</td><td>[START] O1 + h i [SEP] O2 [SEP]</td></tr><tr><td>BERT-ft [Hypothesis Only]</td><td>[CLS] h i [SEP]</td></tr><tr><td>BERT-ft [First Observation Only]</td><td>[CLS] O1 [SEP] h i [SEP]</td></tr><tr><td>BERT-ft [Second Observation Only]</td><td>[CLS] h i [SEP] O2 [SEP]</td></tr><tr><td>BERT-ft [Linear Chain]</td><td>[CLS] O1 [SEP] h i [SEP] ; [CLS] h i [SEP] O2 [SEP]</td></tr><tr><td>BERT-ft [Fully Connected]</td><td>[CLS] O1 + O2 [SEP] h i [SEP]</td></tr></table>
326
+
327
+ Table 7: Input formats for GPT and BERT fine-tuning.
328
+
329
+ # A.5 ADVERSARIAL FILTERING OF HYPOTHESES CHOICES
330
+
331
+ Given an observation pair and sets of plausible and implausible hypotheses $\langle O_1, O_2, \mathcal{H}^+, \mathcal{H}^- \rangle$ , our adversarial filtering algorithm selects one plausible and one implausible hypothesis $\langle O_1, O_2, h^+, h^- \rangle$ such that $h^+$ and $h^-$ are hard to distinguish between. We make three key improvements over the previously proposed Adversarial Filtering (AF) approach in Zellers et al. (2018). First, Instead of a single positive sample, we exploit a pool $\mathcal{H}^+$ of positive samples to choose from (i.e. plausible hypotheses). Second, Instead of machine generated distractors, the pool $\mathcal{H}^-$ of negative samples (i.e. implausible hypotheses) is human-generated. Thus, the distractors share stylistic features of the positive samples as well as that of the context (i.e. observations $O_1$ and $O_2$ ) – making the negative samples harder to distinguish from positive samples. Finally, We use BERT (Devlin et al., 2018) as
332
+
333
+ the adversary and introduce a temperature parameter that controls the maximum number of instances that can be modified in each iteration of AF. In later iterations, fewer instances get modified resulting in a smoother convergence of the AF algorithm (described in more detail below).
334
+
335
+ Algorithm 1 provides a formal description of our approach. In each iteration $i$ , we train an adversarial model $M_{i}$ on a random subset $\mathcal{T}_i$ of the data and update the validation set $\mathcal{V}_i$ to make it more challenging for $M_{i}$ . For a pair $(h_k^+, h_k^-)$ of plausible and implausible hypotheses for an instance $k$ , we denote $\delta = \Delta_{M_i}(h_k^+, h_k^-)$ the difference in the model evaluation of $h_k^+$ and $h_k^-$ . A positive value of $\delta$ indicates that the model $M_{i}$ favors the plausible hypothesis $h_k^+$ over the implausible one $h_k^-$ . With probability $t_i$ , we update instance $k$ that $M_{i}$ gets correct with a pair $(h^+, h^-) \in \mathcal{H}_k^+ \times \mathcal{H}_k^-$ of hypotheses that reduces the value of $\delta$ , where $\mathcal{H}_k^+$ (resp. $\mathcal{H}_k^-$ ) is the pool of plausible (resp. implausible) hypotheses for instance $k$ .
336
+
337
+ We ran AF for 50 iterations and the temperature $t_i$ follows a sigmoid function, parameterized by the iteration number, between $t_s = 1.0$ and $t_e = 0.2$ . Our final dataset, ARIT, is generated using BERT as the adversary in Algorithm 1.
338
+
339
+ Algorithm 1: Dual Adversarial Filtering
340
+ input: dataset $\mathcal{D}_0$ , plausible & implausible hypothesis sets $(\mathcal{H}^{+},\mathcal{H}^{-})$ , number of iterations $n$ initial & final temperatures $(t_s,t_e)$
341
+ output: dataset $\mathcal{D}_n$
342
+ 1 for iteration i: 0..n-1 do
343
+ 2 $t_i = t_e + \frac{t_s - t_e}{1 + e^{0.3(i - \frac{3n}{4})}}$
344
+ 3 Randomly partition $\mathcal{D}_i$ into $(T_{i},V_{i})$
345
+ 4 Train model $M_{i}$ on $\mathcal{T}_i$
346
+ 5 $\mathcal{S}_i = \emptyset$ , the selected hypotheses for $\nu_{i}$
347
+ 6 for $(h_k^+,h_k^-)\in \mathcal{V}_i$ do
348
+ 7 Pick $r$ uniformly at random in [0, 1].
349
+ 8 if $r > t_i$ or $\Delta_{M_i}(h_k^+,h_k^-) < 0$ then
350
+ 9 Add $(h_k^+,h_k^-)$ to $S_{i}$
351
+ 10 else
352
+ 11 Pick $(h^{+},h^{-})\in \mathcal{H}_{k}^{+}\times \mathcal{H}_{k}^{-}$ s.t. $\Delta_{M_i}(h^+,h^-) < \Delta_{M_i}(h_k^+,h_k^-)$
353
+ 12 Add $(h^{+},h^{-})$ to $S_{i}$
354
+ 13 end
355
+ 14 end
356
+ 15 $\mathcal{D}_{i + 1} = \mathcal{T}_i\cup \mathcal{S}_i$
357
+ 16 end
358
+
359
+ # A.6 ATOMIC RELATIONS
360
+
361
+ ATOMIC (Sap et al., 2019) represents commonsense knowledge as a graph with events are nodes and the following nine relations as edges:
362
+
363
+ 1. xIntent: Why does X cause an event?
364
+ 2. xNeed: What does X need to do before the event?
365
+ 3. xAttr: How would X be described?
366
+ 4. xEffect: What effects does the event have on X?
367
+ 5. xWant: What would X likely want to do after the event?
368
+ 6. xReaction: How does X feel after the event?
369
+ 7. oReact: How do others' feel after the event?
370
+ 8. oWant: What would others likely want to do after the event?
371
+ 9. oEffect: What effects does the event have on others?
372
+
373
+ <table><tr><td>Model</td><td>Input Format</td></tr><tr><td>GPT2-Fixed</td><td>w11...wn1w12...wn2Because,</td></tr><tr><td>O1-O2-Only</td><td>{o1}w11...wn1{/o1}{o2}w12...wn2{/o2}{h}</td></tr><tr><td>COMeT-Txt+GPT2</td><td>{p1}T11...T9{p9} {p1}T12...T9{p9} {o1}w11...wn1{/o1}{o2}w12...wn2{/o2}{h}</td></tr><tr><td>COMeT-Emb+GPT2</td><td>c11...c9; c12...c9{o1}w11...wn1{/o1}{o2}w12...wn2{/o2}{h}</td></tr></table>
374
+
375
+ Table 8: Input format used to training and generated text from various GPT2 based models. $c_{i}^{j}$ refers to the COMeEmbeddings obtained using a separate transformer model for relation $i$ and observation $j$ . Similarly, $T_{i}^{j}$ is the textual phrase for relation $i$ , observation $j$ . Where appropriate, field specific start and end-tags are added to the sequence of inputs.
376
+
377
+ # A.7 GENERATION MODELS INPUT FORMAT
378
+
379
+ Table 8 describes the format of input to each variation of the generative model evaluated.
abductivecommonsensereasoning/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18c16634b385d7d3017a3332e79188396f307f7ab63b56a4b40ad7de8ada6344
3
+ size 500668
abductivecommonsensereasoning/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02c798bff20644760757ba81f741335614a1d53eebd56176e5eff7239e2169d7
3
+ size 637729
abstractdiagrammaticreasoningwithmultiplexgraphnetworks/296a869c-fc52-41ed-b434-88f35abef136_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be8881c0ef189a53efc5810aa85db1544f9d65098c790f55d3487bc55be760e9
3
+ size 93371
abstractdiagrammaticreasoningwithmultiplexgraphnetworks/296a869c-fc52-41ed-b434-88f35abef136_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fbfa9300c47a695c53a6fde5fc2a01810b3159e1b706d561d0f326f7ef1a35b
3
+ size 109812
abstractdiagrammaticreasoningwithmultiplexgraphnetworks/296a869c-fc52-41ed-b434-88f35abef136_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:158240f2193bb8679b931e86a65004cbd54aa944959a3d0b5d35238cf67b60ee
3
+ size 1476097
abstractdiagrammaticreasoningwithmultiplexgraphnetworks/full.md ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ABSTRACT DIAGRAMMATIC REASONING WITH MULTIPLEX GRAPH NETWORKS
2
+
3
+ Duo Wang & Mateja Jamnik & Pietro Lio
4
+
5
+ Department of Computer Science and Technology
6
+
7
+ University of Cambridge
8
+
9
+ Cambridge, United Kingdom
10
+
11
+ {Duo.Wang,Mateja.Jamnik,Pietro.Lio}@cl.cam.ac.uk
12
+
13
+ # ABSTRACT
14
+
15
+ Abstract reasoning, particularly in the visual domain, is a complex human ability, but it remains a challenging problem for artificial neural learning systems. In this work we propose MXGNet, a multilayer graph neural network for multi-panel diagrammatic reasoning tasks. MXGNet combines three powerful concepts, namely, object-level representation, graph neural networks and multiplex graphs, for solving visual reasoning tasks. MXGNet first extracts object-level representations for each element in all panels of the diagrams, and then forms a multi-layer multiplex graph capturing multiple relations between objects across different diagram panels. MXGNet summarises the multiple graphs extracted from the diagrams of the task, and uses this summarisation to pick the most probable answer from the given candidates. We have tested MXGNet on two types of diagrammatic reasoning tasks, namely Diagram Syllogisms and Raven Progressive Matrices (RPM). For an Euler Diagram Syllogism task MXGNet achieves state-of-the-art accuracy of $99.8\%$ . For PGM and RAVEN, two comprehensive datasets for RPM reasoning, MXGNet outperforms the state-of-the-art models by a considerable margin.
16
+
17
+ # 1 INTRODUCTION
18
+
19
+ Abstract reasoning has long been thought of as a key part of human intelligence, and a necessary component towards Artificial General Intelligence. When presented in complex scenes, humans can quickly identify elements across different scenes and infer relations between them. For example, when you are using a pile of different types of LEGO bricks to assemble a spaceship, you are actively inferring relations between each LEGO brick, such as in what ways they can fit together. This type of abstract reasoning, particularly in the visual domain, is a crucial key to human ability to build complex things.
20
+
21
+ Many tests have been proposed to measure human ability for abstract reasoning. The most popular test in the visual domain is the Raven Progressive Matrices (RPM) test (Raven (2000)). In the RPM test, the participants are asked to view a sequence of contextual diagrams, usually given as a $3 \times 3$ matrices of diagrams with the bottom-right diagram left blank. Participants should infer abstract relationships in rows or columns of the diagram, and pick from a set of candidate answers the correct one to fill in the blank. Figures 1 (a) show an example of RPM tasks containing XOR relations across diagrams in rows. More examples can be found in Appendix C. Another widely used test for measuring reasoning in psychology is Diagram Syllogism task (Sato et al. (2015)), where participants need to infer conclusions based on 2 given premises. Figure 1c shows an example of Euler Diagram Syllogism task.
22
+
23
+ Barrett et al. (2018) recently published a large and comprehensive RPM-style dataset named Procedurally Generated Matrices 'PGM', and proposed Wild Relation Network (WReN), a state-of-the-art neural net for
24
+
25
+ ![](images/720c91e539e2eb8f14c058bb200937745f0473ac255978ae6d0219e2c2711e64.jpg)
26
+ (a)
27
+
28
+ ![](images/c4edbceae5efa52378d5730b51d1e64b205dc01c9a4bfa0700dfe47abe3a8fee.jpg)
29
+ (b)
30
+
31
+ ![](images/ce23c8e00ec36feb9c55a1936ad92fee3dc8ad7b492cbebbe393f85ffea89343.jpg)
32
+ Figure 1: (a) shows an example of RPM tasks containing XOR relations across diagrams in rows and the overview of MXGNet architecture. Here $F_{\rho}$ is object representation module, $E_{\gamma}$ is edge embeddings module, $G_{\phi}$ is graph summarization module and $R_{\theta}$ is reasoning network. (b) shows an example of a multilayer graph formed from objects in the first row of diagrams in the example. (c) An example of syllogism represented in Euler diagrams.
33
+
34
+ ![](images/2c3bd4847908e8df54c82ffaaf26a0ce9957e284040ea7ad81bcb8b78ff61a9f.jpg)
35
+ (c)
36
+
37
+ ![](images/cd6e73ed9f8ebe75ea5a738ec25974906e7cc3275c08a4c938f0fb7648970444.jpg)
38
+
39
+ RPM-style tasks. While WReN outperforms other state-of-the-art vision models such as Residual Network He et al. (2016), the performance is still far from deep neural nets' performance on other vision or natural language processing tasks. Recently, there has been a focus on object-level representations (Yi et al. (2018); Hu et al. (2017); Hudson & Manning (2018); Mao et al. (2019); Teney et al. (2017); Zellers et al. (2018)) for visual reasoning tasks, which enable the use of inductive-biased architectures such as symbolic programs and scene graphs to directly capture relations between objects. For RPM-style tasks, symbolic programs are less suitable as these programs are generated from given questions in the Visual-Question Answering setting. In RPM-style tasks there are no explicit questions. Encoding RPM tasks into graphs is a more natural choice. However, previous works on scene graphs (Teney et al. (2017); Zellers et al. (2018)) model a single image as graphs, which is not suitable for RPM tasks as there are many different layers of relations across different subsets of diagrams in a single task.
40
+
41
+ In this paper we introduce MXGNet, a multi-layer multiplex graph neural net architecture for abstract diagram reasoning. Here 'Multi-layer' means the graphs are built across different diagram panels, where each diagram is a layer. 'Multiplex' means that edges of the graphs encode multiple relations between different element attributes, such as colour, shape and position. Multiplex networks are discussed in detail by Kao & Porter (2018). We first tested the application of multiplex graph on a Diagram Syllogism dataset (Wang et al. (2018a)), and confirmed that multiplex graph improves performance on the original model. For RPM task, MXGNet encodes subsets of diagram panels into multi-layer multiplex graphs, and combines summarisation of several graphs to predict the correct candidate answer. With a hierarchical summarisation scheme, each graph is summarised into feature embeddings representing relationships in the subset. These relation embeddings are then combined to predict the correct answer.
42
+
43
+ For PGM dataset (Barrett et al. (2018)), MXGNet outperforms WReN, the previous state-of-the-art model, by a considerable margin. For 'neutral' split of the dataset, MXGNet achieves $89.6\%$ test accuracy, $12.7\%$ higher than WReN's $76.9\%$ . For other splits MXGNet consistently performs better with smaller margins. For the RAVEN dataset (Zhang et al. (2019)), MXGNet, without any auxiliary training with additional labels, achieves $83.91\%$ test accuracy, outperforming $59.56\%$ accuracy by the best model with auxiliary training for the RAVEN dataset. We also show that MXGNet is robust to variations in forms of object-level representations. Both variants of MXGNet achieve higher test accuracies than existing best models for the two datasets.
44
+
45
+ # 2 RELATED WORK
46
+
47
+ Raven Progressive Matrices: Hoshen & Werman (2017) proposed a neural network model on Raven-style reasoning tasks that are a subset of complete RPM problems. Their model is based on Convolutional Network, and is demonstrated to be ineffective in complete RPM tasks (Barrett et al. (2018)). Mandziuk & Zychowski also experimented with an auto-encoder based neural net on simple single-shape RPM tasks. Barrett et al. (2018) built PGM, a complete RPM dataset, and proposed WReN, a neural network architecture based on Relation Network (Santoro et al. (2017)). Steenbrugge et al. (2018) replace CNN part of WReN with a pre-trained Variational Auto Encoder and slightly improved performance. Zhang et al. (2019) built RAVEN, a RPM-style dataset with structured labels of elements in the diagrams in the form of parsing trees, and proposed Dynamic Residual Trees, a simple tree neural network for learning with these additional structures. Anonymous (2020) applies Multi-head attention (Vaswani et al. (2017)), originally developed for Language model, on RPM tasks.
48
+
49
+ Visual Reasoning: RPM test falls in the broader category of visual reasoning. One widely explored task of visual reasoning is Visual Question Answering(VQA). Johnson et al. (2017) built CLEVR dataset, a VQA dataset that focuses on visual reasoning instead of information retrieval in traditional VQA datasets. Current leading approaches (Yi et al. (2018); Mao et al. (2019)) on CLEVR dataset generate synthetic programs using questions in the VQA setting, and use these programs to process object-level representations extracted with objection detection models (Ren et al. (2015)). This approach is not applicable to RPM-style problems as there is no explicit question present for program synthesis.
50
+
51
+ Graph Neural Networks: Recently there has been a surge of interest in applying Graph Neural Networks (GNN) for datasets that are inherently structured as graphs, such as social networks. Many variants of GNNs (Li et al. (2015); Hamilton et al. (2017); Kipf & Welling (2016); Velicković et al. (2017)) have been proposed, which are all based on the same principle of learning feature representations of nodes by recursively aggregating information from neighbour nodes and edges. Recent methods (Teney et al. (2017); Zellers et al. (2018)) extract graph structures from visual scenes for visual question answering. These methods build scene graphs in which nodes represent parts of the scene, and edges capture relations between these parts. Such methods are only applied to scenes of a single image. For multi-image tasks such as video classification, Wang et al. (2018b) proposed non-local neural networks, which extract dense graphs where pixels in feature maps are connected to all other feature map pixels in the space-time dimensions.
52
+
53
+ # 3 REASONING TASKS
54
+
55
+ # 3.1 DIAGRAM SYLLOGISM
56
+
57
+ Syllogism is a reasoning task where conclusion is drawn from two given assumed propositions (premises). One well-known example is 'Socrates is a man, all man will die, therefore Socrates will die'. Syllogism can be conveniently represented using many types of diagrams (Al-Fedaghi (2017)) such as Euler diagrams and Venn diagrams. Figure 1 (c) shows an example of Euler diagram syllogism. Wang et al. (2018a) developed Euler-Net, a neural net architecture that tackles Euler diagram syllogism tasks. However Euler-Net is just a simple Siamese Conv-Net, which does not guarantee scalability to more entities in diagrams. We show that the addition of multiplex graph both improves performance and scalability to more entities.
58
+
59
+ # 3.2 RAVEN PROGRESSIVE MATRICES
60
+
61
+ In this section we briefly describe Raven Progressive Matrices (RPM) in the context of the PGM dataset (Barrett et al. (2018)) and the RAVEN dataset (Zhang et al. (2019)). RPM tasks usually have 8 context diagrams and 8 answer candidates. The context diagrams are laid out in a $3 \times 3$ matrix $\mathbf{C}$ where $c_{1,1}, \ldots, c_{3,2}$ are context diagrams and $c_{3,3}$ is a blank diagram to be filled with 1 of the 8 answer candidates $\mathbf{A} = \{a_1, \ldots, a_8\}$ . One or more relations are present in rows or/and columns of the matrix. For example, in Figure 1 (a), there is $XOR$ relation of positions of objects in rows of diagrams. With the correct answer filled in, the third row and column must satisfy all relations present in the first 2 rows and columns (in the RAVEN dataset, relations are only present in rows). In addition to labels of correct candidate choice, both datasets also provide labels of meta-targets for auxiliary training. The meta-target of a task is a multi-hot vector encoding tuples of $(r, o, a)$ where $r$ is the type of a relation present, $o$ is the object type and $a$ is the attribute. For example, the meta-target for Figure 1 (a) encodes ( $XOR$ , $Shape$ , $Position$ ). The RAVEN dataset also provides additional structured labels of relations in the diagram. However, we found that structured labels do not improve results, and therefore did not use them in our implementation.
62
+
63
+ # 4 METHOD
64
+
65
+ MXGNet is comprised of three main components: an object-level representation module, a graph processing module and a reasoning module. Figure 1a shows an overview of the MXGNet architecture. The object-level representation module $F_{\rho}$ , as the name suggests, extracts representations of objects in the diagrams as nodes in a graph. For each diagram $d_{i} \subset \mathbf{C} \cup \mathbf{A}$ , a set of nodes $v_{i,j}$ ; $i = 1 \ldots L$ , $j = 1 \ldots N$ is extracted where $L$ is the number of layers and $N$ is the number of nodes per layer. We experimented with both fixed and dynamically learnt $N$ values. We also experimented with an additional 'background' encoder that encodes background lines (See Appendix C for an example containing background lines) into a single vector, which can be considered as a single node. The multiplex graph module $G_{\phi}$ , for a subset of diagrams, learns the multiplex edges capturing multiple parallel relations between nodes in a multi-layer graph where each layer corresponds to one diagram in the subset, as illustrated in Figure 1 (c). In MXGNet, we consider a subset of cardinality 3 for $3 \times 3$ diagram matrices. While prior knowledge of RPM rules allows us to naturally treat rows and columns in RPM as subsets, this prior does not generalise to other types of visual reasoning problems. Considering all possible diagram combinations as subsets is computationally expensive. To tackle this, we developed a relatively quick pre-training method to greatly reduce the search space of subsets, as described below.
66
+
67
+ Search Space Reduction: We can consider each diagram as node $v_{i}^{d}$ in a graph, where relations between adjacent diagrams are embedded as edges $e_{ij}^{d}$ . Note here we are considering the graph of 'diagrams', which is different from the graph of 'objects' in the graph processing modules. Each subset of 3 diagrams in this case can be considered as a subset of 2 edges. We here make weak assumptions that edges exist between adjacent diagrams (including vertical, horizontal and diagonal direction) and edges in the same subset must be adjacent (defined as two edges linking the same node), which are often used in other visual reasoning
68
+
69
+ problems. We denote the subset of edges as $\{e_{ij}^d,e_{jk}^d\}$ . We use 3 neural nets to embed nodes, edges and subsets. We use CNNs to embed diagram nodes into feature vectors, and MLPs to embed edges based on node embeddings and subsets based on edge embeddings. While it is possible to include graph architectures for better accuracy, we found that simple combinations of CNNs and MLPs train faster while still achieving the search space reduction results. This architecture first embed nodes, then embeds edges based on node embedding, and finally embed subsets based on edge embedding. The subset embeddings are summed and passed through a reasoning network to predict answer probability, similar to WReN (Barrett et al. (2018)). For the exact configuration of the architecture used please refer to Appendix A. For each subset $\{e_{ij}^d,e_{jk}^d\}$ , we define a gating variable $G_{ijk}$ , controlling how much does each subset contribute to the final result. In practice we use tanh function, which allows a subset to contribute both positively and negatively to the final summed embeddings. In training we put L1 regularization constraint on the gating variables to suppress $G_{ijk}$ of non-contributing subsets close to zero. This architecture can quickly discover rows and columns as contributing subsets while leaving gating variables of other subsets not activated. We describe the experiment results in section 5.1. While this method is developed for discovering reasoning rules for RPM task, it can be readily applied to any other multi-frame reasoning task for search space reduction. In the rest of the paper, we hard-gate subsets by rounding the gating variables, thereby reducing subset space to only treat rows and columns as valid subsets.
70
+
71
+ We treat the first 2 rows and columns as contextual subsets $c_{i,j}$ where $i$ and $j$ are row and column indices. For the last row and column, where the answers should be filled in, we fill in each of the 8 answer candidates, and make 8 row subsets $a_i, i \subset [1,8]$ and 8 column subsets $a_i, i \subset [1,8]$ .
72
+
73
+ The graph module then summarises the graph of objects in a subset into embeddings representing relations present in the subset. The reasoning module $R_{\theta}$ takes embeddings from context rows/columns and last rows/columns with different candidate answers filled in, and produce normalised probability of each answer being true. It also predicts meta-target for auxiliary training using context rows/columns. Next, we describe each module in detail.
74
+
75
+ # 4.1 OBJECT-LEVEL REPRESENTATION
76
+
77
+ In the PGM dataset there are two types of objects, namely 'shapes' and background 'lines'. While it is a natural choice to use object-level representation on shapes as they are varying in many attributes such as position and size, it is less efficient on background lines as they only vary in colour intensity. In this section we first describe object-level representation applied to 'shapes' objects, and then discuss object-level representation on 'lines' and an alternative background encoder which performs better.
78
+
79
+ In MXGNet we experiment with two types of object-level representations for 'shapes', namely CNN grid features and representation obtained with spatial attention. For CNN grid features, we use each spatial location in the final CNN feature map as the object feature vector. Thus for each feature maps of width $W$ and height $H$ , $N = W \times H$ object representations are extracted. This type of representation is used widely, such as in Relation Network (Santoro et al. (2017)) and VQ-VAE (van den Oord et al. (2017)). For representation obtained with attention, we use spatial attention to attend to locations of objects, and extract representations for each object attended. This is similar to objection detection models such as faster R-CNN (Ren et al. (2015)), which use a Region Proposal Network to propose bounding boxes of objects in the input image. For each attended location a presence variable $z_{pres}$ is predicted by attention module indicating whether an object exists in the location. Thus the total number of objects $N$ can vary depending on the sum of $z_{pres}$ variables. As object-level representation is not the main innovation of this paper, we leave exact details for Appendix A.1.
80
+
81
+ For background 'lines' objects, which are not varying in position and size, spatial attention is not needed. We experimented with a recurrent encoder with Long-Short Term Memory (Hochreiter & Schmidhuber (1997)) on the output feature map of CNN, outputting $M$ number of feature vectors. However, in the experiment
82
+
83
+ ![](images/981a508a3c0d58684419fe8dd8f9ca2ac1d942a1f55ee6952f4584624acf0c73.jpg)
84
+ Figure 2: Illustration of multiplex edge embeddings and cross-gating function. Each edge contains a set of different sub-connections (colored differently). Multiplex edges connecting to each node in the last layer are aggregated according to its originating layer. Aggregated embeddings are then passed to a gating function $G$ , which outputs gating variables from each aggregated embeddings.
85
+
86
+ we found that this performs less well than just feature map embeddings produced by feed-forward conv-net encoder.
87
+
88
+ # 4.2 MULTIPLEX GRAPH NETWORK
89
+
90
+ Multiplex Edge Embedding: The object-level representation module outputs a set of representations $v_{i,j}$ ; $i \subset [1, L], j \subset [1, N]$ for 'shapes' objects, where $L$ is the number of layers (cardinality of subset of diagrams) and $N$ is the number of nodes per layer. MXGNet uses an multiplex edge-embedding network $E_{\gamma}$ to generate edge embeddings encoding multiple parallel relation embeddings:
91
+
92
+ $$
93
+ e _ {(i, j), (l, k)} ^ {t} = E _ {\gamma} ^ {t} \left(P ^ {k} \left(v _ {i, j}, v _ {l, k}\right)\right); i \neq l, t = 1 \dots T \tag {1}
94
+ $$
95
+
96
+ Here $P^t$ is a projection layer projecting concatenated node embeddings to $T$ different embeddings. $E^t$ is a small neural net processing $t^{th}$ projections to produce the $t^{th}$ sub-layer of edge embeddings. Here, we restricted the edges to be inter-layer only, as we found using intra-layer edges does not improve performance but increases computational costs. Figure 2 illustrates these multiplex edge embeddings between nodes of different layers. We hypothesise that different layers of the edge embeddings encode similarities/differences in different feature spaces. Such embeddings of similarities/differences are useful in comparing nodes for subsequent reasoning tasks. For example, for Progressive relation of object sizes, part of embeddings encoding size differences can be utilized to check if nodes in later layers are larger in size. This is similar to Mixture of Experts layers (Eigen et al. (2013); Shazeer et al. (2017)) introduced in Neural Machine Translation tasks. However, in this work we developed a new cross-multiplexing gating function at the node message aggregation stage, which is described below.
97
+
98
+ Graph Summarisation: After edge embeddings are generated, the graph module then summarises the graph into a feature embedding representing relations present in the subset of diagrams. We aggregate information in the graph to nodes of the last layer corresponding to the third diagram in a row or column, because in RPM tasks the relations are in the form $Diagram3 = Function(Diagram1, Diagram2)$ . All edges connecting nodes in a particular layer $v_{i,j}$ ; $i \neq L$ , to a node $v_{L,k}$ in the last layer $L$ are aggregated by a function $F_{ag}$ composed of four different types of set operations, namely max, min, sum and mean:
99
+
100
+ $$
101
+ f v _ {i, k} = F _ {a g} \left(e _ {(i, 1), (L, k)} \dots e _ {(i, 1), (L, k)}\right); F _ {a g} = c o n c a t (\max (), \min (), \operatorname {s u m} (), \operatorname {m e a n} ()) \tag {2}
102
+ $$
103
+
104
+ We use multiple aggregation functions together because different sub-tasks in reasoning may require different types of summarization. For example, counting number of objects is better suited for sum while checking if there is a object with the same size is better suited for max. The aggregated node information from each layer is then combined with a cross-multiplexing gating function. It is named 'cross-multiplexing' because each embeddings in the set are 'multiplexing' other embeddings in the set with gating variables that regulate which stream of information pass through. This gating function accepts a set of summarised node embeddings $\{fv_{1,k}\ldots fv_{N,k}\}$ as input, and output gating variables for each layer of node embeddings in the set:
105
+
106
+ $$
107
+ \mathbf {g} _ {1, k} \dots \mathbf {g} _ {N, k} = G (f v _ {1, k} \dots f v _ {N, k}); \mathbf {g} _ {i, k} = \left\{g _ {i, k} ^ {1} \dots g _ {i, k} ^ {T} \right\} \tag {3}
108
+ $$
109
+
110
+ In practice $G$ is implemented as an MLP with multi-head outputs for different embeddings, and Sigmoid activation which constrains gating variable $g$ within the range of 0 to 1. The node embeddings of different layers are then multiplied with the gating variables, concatenated and passed through a small MLP to produce the final node embeddings: $fv_{k} = MLP(concat(\{fv_{i,k} \times g_{i}(i,k) | i = 1 \dots N\}))$ . Node embeddings and background embeddings are then concatenated and processed by a residual neural block to produce final relation feature embeddings $r$ of the diagram subset.
111
+
112
+ # 4.3 REASONING NETWORK
113
+
114
+ The reasoning network takes relation feature embeddings $r$ from all graphs, and infers the correct answer based on these relation embeddings. We denote the relation embeddings for context rows as $r_i^{cr}$ ; $i = 1,2$ and context columns as $r_i^{cc}$ ; $i = 1,2$ . The last row and column filled with each answer candidate $a_i$ are denoted $r_i^{ar}$ ; $i = 1,\ldots ,8$ and $r_i^{ac}$ ; $i = 1,\ldots ,8$ . For the RAVEN dataset, only row relation embeddings $r^{cr}$ and $r^{ar}$ are used, as discussed in Section 3.2. The reasoning network $R_{\theta}$ is a multi-layer residual neural net with a softmax output activation that processes concatenated relation embeddings and outputs class probabilities for each answer candidate. The exact configuration of the reasoning network can be found in Appendix A.3.
115
+
116
+ For meta-target prediction, all relation information is contained in the context rows and columns of the RPM task. Therefore, we apply a meta-predicting network $R_{meta}$ with Sigmoid output activation to all context rows and columns to obtain probabilities of each meta-target categories:
117
+
118
+ $$
119
+ p _ {m e t a} = R _ {m e t a} \left(r _ {1} ^ {c r} + r _ {2} ^ {c r} + r _ {1} ^ {c c} + r _ {2} ^ {c c}\right) \tag {4}
120
+ $$
121
+
122
+ # 4.4 TRAINING
123
+
124
+ The full pipeline of MXGNet is end-to-end trainable with any gradient descent optimiser. In practice, we used RAdam optimiser (Liu et al. (2019)) for its fast convergence and robustness to learning rate differences. The loss function for the PGM dataset is the same as used in WReN (Barrett et al. (2018)): $\mathcal{L} = \mathcal{L}_{ans} + \beta \mathcal{L}_{meta - target}$ where $\beta$ balances the training between answer prediction and meta-target prediction. For the RAVEN dataset, while the loss function can include auxiliary meta-target and structured labels as $\mathcal{L} = \mathcal{L}_{ans} + \alpha \mathcal{L}_{struct} + \beta \mathcal{L}_{meta - target}$ , we found that both auxiliary targets do not improve performance, and thus set $\alpha$ and $\beta$ to 0.
125
+
126
+ # 5 EXPERIMENTS
127
+
128
+ # 5.1 SEARCH SPACE REDUCTION
129
+
130
+ The Search Space Reduction model is applied on both PGM and RAVEN dataset to reduce the subset space. After 10 epochs, only gating variables of rows and columns subset for PGM and of rows for RAVEN have value larger than 0.5. The Gating variables for three rows are 0.884, 0.812 and 0.832. The gating variables for three columns are 0.901, 0.845 and 0.854. All other gating variables are below the threshold value of 0.5. Interestingly all activated (absolute value $>0.5$ ) gating variables are positive. This is possibly because it is easier for the neural net to learn an aggregation function than a comparator function. Exact experiment statistics can be found in Appendix D.
131
+
132
+ # 5.2 DIAGRAM SYLLOGISM PERFORMANCE
133
+
134
+ We first test how well can the multiplex graph network capture relations for the simple Diagram Syllogism task. We simply add the multiplex graph to the original Conv-Net used in (Wang et al. (2018a)). MXGNet achieved $99.8\%$ accuracy on both 2-contour and 3-contour tasks, higher than the original paper's $99.5\%$ and $99.4\%$ accuracies. The same performance on 2-contour and 3-contour tasks also show that MXGNet scales better for more entities in the diagram. For more details please refer to Appendix E.
135
+
136
+ # 5.3 RPM TASK PERFORMANCES
137
+
138
+ In this section we compare all variants of MXGNet against the state-of-the-art models for the PGM and the RAVEN datasets. For the PGM dataset, we tested against results of WReN (Barrett et al. (2018)) in the auxiliary training setting with $\beta$ value of 10. In addition, we also compared MXGNet with VAE-WReN (Steenbrugge et al. (2018))'s result without auxiliary training. For the RAVEN dataset, we compared with WReN and ResNet model's performance as reported in the original paper (Zhang et al. (2019)). We evaluated MXGNet with different object-level representations (Section 4.1) on the test data in the 'neutral' split of the PGM dataset.
139
+
140
+ Table 1 (a) shows test accuracies of model variants compared with WReN and VAE-WReN for the case without auxiliary training $(\beta = 0)$ and with auxiliary training $(\beta = 10)$ for the PGM dataset. Both model variants of MXGNet outperform other models by a considerable margin, showing that the multi-layer graph is indeed a more suitable way to capture relations in the reasoning task. Model variants using grid features from the CNN feature maps slightly outperform model using spatial-attention-based object representations for both with and without auxiliary training settings. This is possibly because the increased number of parameters for the spatial attention variant leads to over-fitting, as the training losses of both model variants are very close. In our following experiments for PGM we will use model variants using CNN features to report performances.
141
+
142
+ Table 1 (b) shows test accuracies of model variants compared with WReN the best performing ResNet models for RAVEN dataset. WReN surprisingly only achieves $14.69\%$ as tested by Zhang et al. (2019). We include results of the ResNet model with or without Dynamic Residual Trees (DRT) which utilise additional structure labels of relations. We found that for the RAVEN dataset, auxiliary training of MXGNet with meta-target or structure labels does not improve performance. Therefore, we report test accuracies of models trained only with the target-prediction objective. Both variants of MXGNet significantly outperform the ResNet models. Models with spatial attention object-level representations under-perform simpler CNN features slightly, most probably due to overfitting, as the observed training losses of spatial attention models are in fact lower than CNN feature models.
143
+
144
+ # 5.4 GENERALISATION EVALUATION FOR PGM
145
+
146
+ In the PGM dataset, other than the neutral data regime in which test dataset's sampling space is the same as the training dataset, there are also other data regimes which restrict the sampling space of training or test data to evaluate the generalisation capability of a neural network. In the main paper, due to space limitations, we selected 2 representative regimes, the 'interpolation' regime and the 'extrapolation' regime to report results. For results of other data splits of PGM, please refer to Appendix G. For 'interpolation' regime, in the training dataset, when attribute $a = \text{color}$ and $a = \text{size}$ , the values of $a$ are restricted to even-indexed values in the spectrum of $a$ values. This tests how well can a model 'interpolate' for missing values. For 'Extrapolation' regime, in the training dataset, the value of $a$ is restricted to be the lower half of the value spectrum. This tests how well can a model 'extrapolate' outside of the value range in the training dataset. Table 2 shows validation and test accuracies for all three data regimes with and without auxiliary training. In addition, differences between validation and test accuracies are also presented to show how well can models generalise. MXGNet models consistently perform better than WReN for all regimes tested. Interesting for 'Interpolation' regime, while validation accuracy of MXGNet is lower than WReN, the test accuracy is higher. In addition, for regime
147
+
148
+ <table><tr><td>Model</td><td>WReN
149
+ Barrett
150
+ (2018)</td><td>et al.</td><td>VAE-WReN
151
+ Steenbrugge et al.
152
+ (2018)</td><td>ARNe
153
+ Anonymous (2020)</td><td>MXGNet
154
+ CNN</td><td>Sp-Attn</td></tr><tr><td>acc.
155
+ (%)β = 10</td><td>76.9</td><td></td><td>N/A</td><td>88.2</td><td>89.6</td><td>88.8</td></tr><tr><td>acc.
156
+ (%)β = 0</td><td>62.6</td><td></td><td>64.2</td><td>N/A</td><td>66.7</td><td>66.1</td></tr></table>
157
+
158
+ (a) PGM
159
+
160
+ <table><tr><td>Model</td><td>WReN Zhang et al. (2019)</td><td>ResNet Zhang et al. (2019)</td><td>ResNet+DRT Zhang et al. (2019)</td><td>ARNe Anonymous (2020)</td><td>MXGNet CNN Sp-Attn</td></tr><tr><td>acc. (%)</td><td>14.69</td><td>53.43</td><td>59.56</td><td>19.67</td><td>83.91 82.61</td></tr></table>
161
+
162
+ 'Interpolation' and 'Extrapolation', MXGNet also shows a smaller difference between validation and test accuracy. These results show that MXGNet has better capability of generalising outside of the training space.
163
+
164
+ (b) RAVEN
165
+ Table 1: (a) shows results comparing MXGNet model variants against WReN for the PGM dataset. (b) shows results comparing MXGNet model variants against ResNet models for the RAVEN dataset. The object-level representation has two variations which are (o1) CNN features and (o2) Spatial Attention features (Section 4.1).
166
+
167
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Regime</td><td colspan="3">β = 0</td><td colspan="3">β = 10</td></tr><tr><td>Val.(%)</td><td>test%</td><td>Diff.</td><td>Val.(%)</td><td>test%</td><td>Diff.</td></tr><tr><td rowspan="3">WReN</td><td>Neutral</td><td>63.0</td><td>62.6</td><td>-0.4</td><td>77.2</td><td>76.9</td><td>-0.3</td></tr><tr><td>Interpolation</td><td>79.0</td><td>64.4</td><td>-14.6</td><td>92.3</td><td>67.4</td><td>-24.9</td></tr><tr><td>Extrapolation</td><td>69.3</td><td>17.2</td><td>-52.1</td><td>93.6</td><td>15.5</td><td>-79.1</td></tr><tr><td rowspan="3">MXGNet</td><td>Neutral</td><td>67.1</td><td>66.7</td><td>-0.4</td><td>89.9</td><td>89.6</td><td>-0.3</td></tr><tr><td>Interpolation</td><td>74.2</td><td>65.4</td><td>-8.8</td><td>91.5</td><td>84.6</td><td>-6.9</td></tr><tr><td>Extrapolation</td><td>69.1</td><td>18.9</td><td>-50.2</td><td>94.3</td><td>18.4</td><td>-75.9</td></tr></table>
168
+
169
+ Table 2: Generalisation performance comparing MXGNet model variants against WReN. 'Diff.' is the difference between the test and the validation performances.
170
+
171
+ # 6 DISCUSSION AND CONCLUSION
172
+
173
+ We presented MXGNet, a new graph-based approach to diagrammatic reasoning problems in the style of Raven Progressive Matrices (RPM). MXGNet combines three powerful ideas, namely, object-level representation, graph neural networks and multiplex graphs, to capture relations present in the reasoning task. Through experiments we showed that MXGNet performs better than previous models on two RPM datasets. We also showed that MXGNet has better generalisation performance.
174
+
175
+ One important direction for future work is to make MXGNet interpretable, and thereby extract logic rules from MXGNet. Currently, the learnt representations in MXGNet are still entangled, providing little in the way of understanding its mechanism of reasoning. Rule extraction can provide people with better understanding of the reasoning problem, and may allow neural networks to work seamlessly with more programmable traditional logic engines.
176
+
177
+ While the multi-layer multiplex graph neural network is designed for RPM style reasoning task, it can be readily extended to other diagrammatic reasoning tasks where relations are present between multiple elements
178
+
179
+ across different diagrams. One example of a real-world application scenario is robots assembling parts of an object into a whole, such as building a LEGO model from a room of LEGO blocks. MXGNet provides a suitable way of capturing relations between parts, such as ways of piecing and locking two parts together.
180
+
181
+ # REFERENCES
182
+
183
+ Sabah Al-Fedaghi. Logic representation: Aristotelian syllogism by diagram. In Applied Computing and Information Technology/ Intl Conf on Computational Science/intelligence and Applied Informatics/ Intl Conf on Big Data, Cloud Computing, Data Science and Engineering, 2017.
184
+ Anonymous. Attention on abstract visual reasoning. In Submitted to International Conference on Learning Representations, 2020. URL https://openreview.net/forum?id=Bkel1krKPS. under review.
185
+ David Barrett, Felix Hill, Adam Santoro, Ari Morcos, and Timothy Lillicrap. Measuring abstract reasoning in neural networks. In International Conference on Machine Learning, pp. 511-520, 2018.
186
+ David Eigen, Marc'Aurelio Ranzato, and Ilya Sutskever. Learning factored representations in a deep mixture of experts. arXiv preprint arXiv:1312.4314, 2013.
187
+ Will Hamilton, Zhitao Ying, and Jure Leskovec. Inductive representation learning on large graphs. In Advances in Neural Information Processing Systems, pp. 1024-1034, 2017.
188
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.
189
+ Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural computation, 9(8):1735-1780, 1997.
190
+ Dokhyam Hoshen and Michael Werman. Iq of neural networks. arXiv preprint arXiv:1710.01692, 2017.
191
+ Ronghang Hu, Jacob Andreas, Marcus Rohrbach, Trevor Darrell, and Kate Saenko. Learning to reason: End-to-end module networks for visual question answering. In Proceedings of the IEEE International Conference on Computer Vision, pp. 804-813, 2017.
192
+ Drew A Hudson and Christopher D Manning. Compositional attention networks for machine reasoning. arXiv preprint arXiv:1803.03067, 2018.
193
+ Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. arXiv preprint arXiv:1502.03167, 2015.
194
+ Max Jaderberg, Karen Simonyan, Andrew Zisserman, et al. Spatial transformer networks. In Advances in neural information processing systems, pp. 2017-2025, 2015.
195
+ Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144, 2016.
196
+ Justin Johnson, Bharath Hariharan, Laurens van der Maaten, Li Fei-Fei, C Lawrence Zitnick, and Ross Girshick. Clevr: A diagnostic dataset for compositional language and elementary visual reasoning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2901-2910, 2017.
197
+ Ta-Chu Kao and Mason A Porter. Layer communities in multiplex networks. Journal of Statistical Physics, 173(3-4):1286-1302, 2018.
198
+
199
+ Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. arXiv preprint arXiv:1609.02907, 2016.
200
+ Yujia Li, Daniel Tarlow, Marc Brockschmidt, and Richard Zemel. Gated graph sequence neural networks. arXiv preprint arXiv:1511.05493, 2015.
201
+ Liyuan Liu, Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Jiawei Han. On the variance of the adaptive learning rate and beyond. arXiv preprint arXiv:1908.03265, 2019.
202
+ Chris J Maddison, Andriy Mnih, and Yee Whye Teh. The concrete distribution: A continuous relaxation of discrete random variables. arXiv preprint arXiv:1611.00712, 2016.
203
+ Jacek Mandziuk and Adam Zychowski. Deepiq: A human-inspired ai system for solving iq test problems.
204
+ Jiayuan Mao, Chuang Gan, Pushmeet Kohli, Joshua B Tenenbaum, and Jiajun Wu. The neuro-symbolic concept learner: Interpreting scenes, words, and sentences from natural supervision. arXiv preprint arXiv:1904.12584, 2019.
205
+ John Raven. The raven's progressive matrices: change and stability over culture and time. Cognitive psychology, 41(1):1-48, 2000.
206
+ Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in neural information processing systems, pp. 91-99, 2015.
207
+ Adam Santoro, David Raposo, David G Barrett, Mateusz Malinowski, Razvan Pascanu, Peter Battaglia, and Timothy Lillicrap. A simple neural network module for relational reasoning. In Advances in neural information processing systems, pp. 4967-4976, 2017.
208
+ Yuri Sato, Sayako Masuda, Yoshiaki Someya, Takeo Tsujii, and Shigeru Watanabe. An fmri analysis of the efficacy of euler diagrams in logical reasoning. In Visual Languages and Human-Centric Computing (VL/HCC), 2015 IEEE Symposium on, pp. 143-151. IEEE, 2015.
209
+ Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff Dean. Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. arXiv preprint arXiv:1701.06538, 2017.
210
+ Xander Steenbrugge, Sam Leroux, Tim Verbelen, and Bart Dhoedt. Improving generalization for abstract reasoning tasks using disentangled feature representations. arXiv preprint arXiv:1811.04784, 2018.
211
+ Damien Teney, Lingqiao Liu, and Anton van den Hengel. Graph-structured representations for visual question answering. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1-9, 2017.
212
+ Aaron van den Oord, Oriol Vinyals, et al. Neural discrete representation learning. In Advances in Neural Information Processing Systems, pp. 6306-6315, 2017.
213
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pp. 5998-6008, 2017.
214
+ Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. Graph attention networks. arXiv preprint arXiv:1710.10903, 2017.
215
+ Duo Wang, Mateja Jamnik, and Pietro Li. Investigating diagrammatic reasoning with deep neural networks. In International Conference on Theory and Application of Diagrams, 2018a.
216
+
217
+ Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. Non-local neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7794-7803, 2018b.
218
+ Kexin Yi, Jiajun Wu, Chuang Gan, Antonio Torralba, Pushmeet Kohli, and Josh Tenenbaum. Neural-symbolic vqa: Disentangling reasoning from vision and language understanding. In Advances in Neural Information Processing Systems, pp. 1031-1042, 2018.
219
+ Rowan Zellers, Mark Yatskar, Sam Thomson, and Yejin Choi. Neural motifs: Scene graph parsing with global context. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 5831-5840, 2018.
220
+ Chi Zhang, Feng Gao, Baoxiong Jia, Yixin Zhu, and Song-Chun Zhu. Raven: A dataset for relational and analogical visual reasoning. arXiv preprint arXiv:1903.02741, 2019.
221
+
222
+ # A ARCHITECTURE
223
+
224
+ In this section we present exact configurations of all model variants of MXGNet. Due to the complexity of architectures, we will describe each modules in sequence. The object-level representation has two variations which are (o1) CNN features and (o2) Spatial Attention features. Also the models for PGM and RAVEN dataset differ in details. Unless otherwise stated, in all layers we apply Batch Normalization Ioffe & Szegedy (2015) and use Rectified Linear Unit as activation function.
225
+
226
+ # A.1 OBJECT-LEVEL REPRESENTATION ARCHITECTURE
227
+
228
+ CNN features: The first approach applies a CNN on the input image and use each spatial location in the final CNN feature map as the object feature vector. This type of representation is used widely, such as in Relation Network Santoro et al. (2017) and VQ-VAE van den Oord et al. (2017). Formally, the output of a CNN is a feature map tensor of dimension $H \times W \times D$ where $H$ , $W$ and $D$ are respectively height, width and depth of the feature map. At each $H$ and $W$ location, an object vector is extracted. This type of object representation is simple and fast, but does not guarantee that the receptive field at each feature map location fully bounds objects in the image.
229
+
230
+ We use a residual module He et al. (2016) with two residual blocks to extract CNN features, as shown in figure 4. This is because Residual connections show better performance in experiments. The structure of a single Residual Convolution Block is shown in figure 3. Unless otherwise stated, convolutional layer in residual blocks has kernel size of $3 \times 3$ . The output feature map processed by another residual block is treated as background encoding because we found that convolutional background encoding gives better results than feature vectors.
231
+
232
+ ![](images/a2c5ce0d4847ea7b46bb33da416635d22b4c956b822b83e6fce0f13406826ad0.jpg)
233
+ Figure 3: Architecture of a single Residual Convolution Block.
234
+
235
+ Spatial Attention Object-level representation: The second approach is to use spatial attention to attend to locations of objects, and extract representations for each object attended. This is similar to object detection models such as faster R-CNN Ren et al. (2015), which use a Region Proposal Network to propose bounding boxes of objects in the input image. In practice, we use Spatial Transformer Jaderberg et al. (2015) as our spatial attention module. Figure 5 shows the architecture used for extracting object-level representation using spatial attention. A CNN composed of 1 conv layer and 2 residual blocks is first applied to the input image, and the last layer feature map is extracted. This part is the same as CNN grid feature module. A spatial attention network composed of 2 conv layer then processes information at each spatial location on the feature map, and outputs $k$ numbers of $z = (z^{pres}, z^{where})$ , corresponding to $k$ possible objects at each location. Here, $z^{pres}$ is a binary value indicating if an object exists in this location, and $z^{where}$ is an affine
236
+
237
+ ![](images/305cd0fa32ce7e601f756cda7d0bf0914e90c2a154ce9b79f45a2552861e45b3.jpg)
238
+ Figure 4: CNN feature object-level representation module. 'Conv' is convolution layers, 'Max-Pooling' is max-pooling layer and 'ResConv Block' is Residual Convolutional Block.
239
+
240
+ transformation matrix specifying a sampling region on the feature maps. $z^{pres}$ , the binary variable, is sampled from Gumbel-Sigmoid distribution Maddison et al. (2016); Jang et al. (2016), which approximates the Bernoulli distribution. We set Gumbel temperature to 0.7 throughout the experiments. For the PGM dataset we restricted $k$ to be 1 and $z^{where}$ to be a translation and scaling matrix as 'shapes' objects do not overlap and do not have affine transformation attributes other than scaling and translation. For all $z_{i}$ ; $i \subset [1, H \times W]$ , if $z_{i}^{pres}$ is 1, an object encoder network samples a patch from location specified by $z_{i}^{where}$ using a grid sampler with a fixed window size of $4 \times 4$ pixels. More details of the grid sampler can be found in Jaderberg et al. (2015). The sampled patches are then processed by a conv-layer to generate object embeddings.
241
+
242
+ ![](images/628e22ca385f431e7167a570d05fb78f217dfe80b929d297462d46c1d446f42a.jpg)
243
+ Figure 5: Spatial attention based feature object-level representation module. 'Conv' is convolution layers, 'Max-Pooling' is max-pooling layer and 'ResConv Block' is Residual Convolutional Block. $z$ is the spatial attention variable ( $z^{pres}, z^{where}$ ). Sampler is a grid sampler which samples grid of points from given feature maps.
244
+
245
+ # A.2 GRAPH NETWORKS
246
+
247
+ Multiplex Edge Embeddings: Figure 2 in the main paper shows an overview of the multiplex graph architecture. While motivation and overview of architecture is explained in section 4.2 of the main paper, in this section we provide exact configurations for each part of the model. Each sub-layer of the multiplex edge is
248
+
249
+ embedded by a small MLP. For PGM dataset, we use 6 parallel layers for each multiplex edge embeddings, with each layer having 32 hidden units and 8 output units. For RAVEN dataset we use 4 layers with 16 hidden units and 8 output units because RAVEN dataset contains fewer relations types than PGM dataset. Gating function is implemented as one Sigmoid fully connected layer with hidden size equal to the length of concatenated aggregated embeddings. Gating variables are element-wise multiplied with concatenated embeddings for gating effects. Gated embeddings are then processed with a final fully connected layer with hidden size 64.
250
+
251
+ Graph Summarization: This module summarizes all node summary embeddings and background embeddings to produce a diagram subset embedding representing relations present in the set of diagrams. We experimented with various approaches and found that keeping embeddings as feature maps and processing them with residual blocks yields the best results. Background feature map embeddings are generated with one additional residual block of 48 on top of lower layer feature-extracting resnet. For object representations obtained from CNN-grid features, we can simply reshape node embeddings into a feature map, and process it with additional conv-nets to generate a feature map embeddings of the same dimension to background feature map embeddings. For object representations with spatial attention, we can use another Spatial Transformer to write node summary embeddings to its corresponding locations on a canvas feature map. Finally we concatenate node summary embeddings and background embeddings and process it with 2 residual blocks of size 64 to produce the relation embeddings.
252
+
253
+ # A.3 REASONING NETWORK
254
+
255
+ Figure 6 shows the reasoning network configuration for RPM tasks. We experimented with the approach introduced in Barrett et al. (2018), which compute scores for each answer candidates and finally normalize the scores. We found this approach leads to severe overfitting on the RAVEN dataset, and therefore used a simpler approach to just concatenate all relation embeddings and process them with a neural net. In practice we used two residual blocks of size 128 and 256, and a final fully connected layer with 8 units corresponding to 8 answer candidates. The output is normalized with softmax layer. For Meta-target prediction, all context relation embeddings (context rows and columns for PGM while only rows for RAVEN dataset) are summed and fed into a fully connected prediction layer with Sigmoid activation. For PGM there are 12 different meta-targets while for RAVEN there are 9.
256
+
257
+ ![](images/35168d3ae3fe537fa2f7f63ca23d2ef8aba2614734eeecdf232a149bff2ed1de.jpg)
258
+ Figure 6: Architecture overview of reasoning module. 'RelEmbed' is relation embeddings, 'Concat' is concatenation layer. 'ResBlock' is Residual Convolutional Block. 'FC' is fully connected layer.
259
+
260
+ # B TRAINING DETAILS
261
+
262
+ The architecture is implemented in Pytorch framework. During training, we used RAdam optimizer Liu et al. (2019) with learning rate 0.0001, $\beta_{1} = 0.9, \beta_{2} = 0.999$ . We used batch size of 64, and distributed the training across 2 Nvidia Geforce Titan X GPUs. We early-stop training when validation accuracy stops increasing.
263
+
264
+ # C MORE DETAILS OF RPM DATASETS
265
+
266
+ In PGM dataset there are two types of elements present in the diagram, namely shapes and lines. These elements have different attributes such as colour and size. In the PGM dataset, five types of relations can be present in the task: {Progression, AND, OR, XOR, ConsistentUnion}. The RAVEN dataset, compared to PGM, does not have logic relations AND, OR, XOR, but has additional relations Arithmetic, Constant. In addition RAVEN dataset only allow relations to be present in rows.
267
+
268
+ Figure 7a and 7b show two examples from the PGM dataset(Image courtesy Barrett et al. (2018)). The first example contains a 'Progression' relation of the number of objects across diagrams in columns. The second examples contain a 'XOR' relation of position of objects across diagrams in rows.
269
+
270
+ In addition to shape objects, diagrams in the PGM dataset can also contain background line objects that appear at fixed locations. Figure 8a and 8b show two examples of PGM tasks containing line objects.
271
+
272
+ # D MORE DETAILS ON SEARCH SPACE REDUCTION
273
+
274
+ In this section we provide detailed architecture used for Search Space reduction, and present additional experimental results.
275
+
276
+ The node embeddings are generated by applying a Conv-Net of 4 convolutional layer (32 filters in each layer) of kernel size 3, and a fully connected layer mapping flattened final-layer feature maps to a feature vector of size 256. Edge embeddings are generated by a 3-layer MLP of $512 - 512 - 256$ hidden units. Subset embeddings are generated by a fully connected layer of 512 units. The subset embeddings are gated with the gating variables and summed into a feature vector, which is then feed into the reasoning net, a 3-layer MLP with $256 - 256 - 13$ . The output layer contains 13 units. The first unit gives probability of currently combined answer choice being true. The rest 12 units give meta-target prediction probabilities. This is the same as Barrett et al. (2018). The training loss function is:
277
+
278
+ $$
279
+ \mathcal {L} = \mathcal {L} _ {a n s} + \beta \mathcal {L} _ {m e t a - t a r g e t} + \lambda \left\| \sum_ {(i, j, k) \subset S} G _ {i, j, k} \right\| _ {L 1} \tag {5}
280
+ $$
281
+
282
+ In our experiment we have tested various values of $\lambda$ , and found 0.01 to be the best. This model is trained with RAdam optimizer with learning rate of 0.0001 and batch size of 64. After 10 epochs of training, only gating variables of subsets that are rows and columns are above the 0.5 threshold. The Gating variables for three rows are 0.884, 0.812 and 0.832. The gating variables for three columns are 0.901, 0.845 and 0.854. All other gating variables are below 0.5. Among these, the one with highest absolute value is 0.411. Table 3 shows the top-16 ranked subsets, with each subset indexed by 2 connecting edges in the subset. Figure 9 illustrates this way of indexing the subset. For example, the first column with red inter-connecting arrows is indexed as 0-3-6. This indicates that there two edges, one connecting diagram 0 and 3, and the other connecting diagram 3-6. Similarly the subset connected by blue arrows is indexed as 1-2-5. Note that 1-2-5 and 2-1-5 is different because the 1-2-5 contains edge 1-2 and 2-5 while 2-1-5 contains edges 1-2 and 1-5.
283
+
284
+ ![](images/e37b8d19dffa7ce4d4be23f31a7bddcc7f42f09d1c5dad53d13dc4feca8f4e0a.jpg)
285
+ Figure 7: Two examples in PGM dataset. (a) task contains a 'Progression' relation of the number of objects across diagrams in columns while (b) contains a 'XOR' relation of position of objects across diagrams in rows.
286
+
287
+ # E MORE DETAILS ON EULER DIAGRAM SYLLOGISM
288
+
289
+ The original model in Wang et al. (2018a) uses a Siamese Conv-Net model to process two input premise diagrams and output all consistent conclusions. Convolutional layers with shared weights are first applied to two input diagrams. The top layer feature maps are then flattened and fed into a reasoning network to make predictions. We simply use CNN grid features of the top layer feature maps as object-level representations, and use the multi-layer multiplex graph to capture object relations between the two input premise diagrams. We use a multiplex edge embeddings of 4 layers, with each layer of dimension 32. The cross-multiplexing here becomes self-multiplexing as there are only 2 diagrams (Only 1 embedding of node summary for edges from first diagram to second diagram). Final node embeddings are processed by a convolutional layer to produce the final embedding, which is also fed into the reasoning network along with the conv-net embeddings.
290
+
291
+ ![](images/a117bf26533adc8a9bc91dc0940c09fea87e073ec8d5f1fb4d2b2eaa006cd3f2.jpg)
292
+ Figure 8: Two examples in PGM dataset containing background line objects.
293
+
294
+ ![](images/4c85ee4a7349f49978b7d962c99c3a9cda6299471f9c0837f6609f4e87320dd7.jpg)
295
+
296
+ # F ABLATION STUDY
297
+
298
+ We performed ablation study experiments to test how much does the multiplex edges affects performance. We have tested two model variants, one without any graph modules, and the other model graphs using vanilla edge embeddings produced by MLPs, on PGM dataset. We found that without graph modules, the model only achieved $83.2\%$ test accuracy. While this is lower than MXGNet's $89.6\%$ , it is still higher than WReN's $76.9\%$ . This is possibly because the search space reduction, by trimming away non-contributing subsets, allow the model to learn more efficiently. The graph model with vanilla edge embeddings achieves $88.3\%$ accuracy, only slightly lower than MXGNet with multiplex edge embeddings. This shows that while general graph neural network is a suitable model for capturing relations between objects, the multiplex edge embedding does so more efficiently by allowing parallel relation multiplexing.
299
+
300
+ # G ADDITIONAL GENERALIZATION PERFORMANCE ON PGM DATASET
301
+
302
+ Table 4 shows performance of MXGNet on other splits of PGM dataset. MXGNet consistently outperforms WReN for test accuracy, except for H.O. Triple Pairs and H.O. shape-color in the case $\beta = 0$ Additionally
303
+
304
+ ![](images/3a85358d1f7dd45eaf7f2e16484a1f5edaec3b26ef73a87866235cbdb0ff7af3.jpg)
305
+ Figure 9: Illustration of diagram ordering in the matrix and numbered representation of subsets.
306
+
307
+ <table><tr><td>Rank</td><td>Diagram subsets</td><td>[GatingVariable]</td></tr><tr><td>1</td><td>0-3-6</td><td>0.901</td></tr><tr><td>2</td><td>0-1-2</td><td>0.884</td></tr><tr><td>3</td><td>2-5-8</td><td>0.854</td></tr><tr><td>4</td><td>1-4-7</td><td>0.845</td></tr><tr><td>5</td><td>6-7-8</td><td>0.832</td></tr><tr><td>6</td><td>3-4-5</td><td>0.812</td></tr><tr><td>7</td><td>1-2-5</td><td>0.411</td></tr><tr><td>8</td><td>2-1-5</td><td>0.384</td></tr><tr><td>9</td><td>3-6-7</td><td>0.381</td></tr><tr><td>10</td><td>3-7-4</td><td>0.364</td></tr><tr><td>11</td><td>6-3-7</td><td>0.360</td></tr><tr><td>12</td><td>1-5-4</td><td>0.357</td></tr><tr><td>13</td><td>0-4-6</td><td>0.285</td></tr><tr><td>14</td><td>3-4-7</td><td>0.282</td></tr><tr><td>15</td><td>1-3-4</td><td>0.273</td></tr><tr><td>16</td><td>1-4-5</td><td>0.271</td></tr></table>
308
+
309
+ Table 3: All subsets ranked by the absolute value of their corresponding gating variables.
310
+
311
+ here we provide the analysis according to Sec 4.2 and Sec 4.6 in Barrett et al. (2018). unfortunately sec 4.3 of this paper, namely the analysis of distractors, cannot be performed as the publicly available dataset does not include any ground truth labels about distractors, nor any labels of present objects that can be used to synthesize distractor labels. For Meta-target prediction, MXG-Net achieves $84.1\%$ accuracy. When Meta-target is correctly predicted, the model's target prediction accuracy increases to $92.4\%$ . When Meta-target is incorrectly predicted, the model only has $75.6\%$ accuracy. For three logical relations the model performs best for $OR$ relation $(95.3\%)$ , and worst for $XOR$ relation $(92.6\%)$ . Accuracy for line-type tasks $(86.5\%)$ is only
312
+
313
+ slightly better than for shape tasks (80.1%), showing that object representation with graph modeling does improve on relations between shapes. The type of relation with worst performance is ConsistentUnion, with only 75.1% accuracy. This is expected as ConsistentUnion is in fact a memory task instead of relational reasoning task.
314
+
315
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Regime</td><td colspan="3">β = 0</td><td colspan="3">β = 10</td></tr><tr><td>Val.(%)</td><td>test%</td><td>Diff.</td><td>Val.(%)</td><td>test%</td><td>Diff.</td></tr><tr><td rowspan="5">WReN</td><td>H.O. Attribute Pairs</td><td>46.7</td><td>27.2</td><td>-19.5</td><td>73.4</td><td>51.7</td><td>-21.7</td></tr><tr><td>H.O. Triple Pairs</td><td>63.9</td><td>41.9</td><td>-22.0</td><td>74.5</td><td>56.3</td><td>-18.2</td></tr><tr><td>H.O. Triples</td><td>63.4</td><td>19.0</td><td>-44.4</td><td>80.0</td><td>20.1</td><td>-59.9</td></tr><tr><td>H.O. line-type</td><td>59.5</td><td>14.4</td><td>-45.1</td><td>78.1</td><td>16.4</td><td>-61.7</td></tr><tr><td>H.O. shape-color</td><td>69.3</td><td>17.2</td><td>-52.1</td><td>93.6</td><td>15.5</td><td>-78.1</td></tr><tr><td rowspan="5">MXGNet</td><td>H.O. Attribute Pairs</td><td>68.3</td><td>33.6</td><td>-34.7</td><td>81.9</td><td>69.3</td><td>-12.6</td></tr><tr><td>H.O. Triple Pairs</td><td>67.1</td><td>43.3</td><td>-23.8</td><td>78.1</td><td>64.2</td><td>-13.9</td></tr><tr><td>H.O. Triples</td><td>63.7</td><td>19.9</td><td>-43.8</td><td>80.5</td><td>20.2</td><td>-60.3</td></tr><tr><td>H.O. line-type</td><td>60.1</td><td>16.7</td><td>-43.4</td><td>85.2</td><td>16.8</td><td>-61.5</td></tr><tr><td>H.O. shape-color</td><td>68.5</td><td>16.6</td><td>-51.9</td><td>89.2</td><td>15.6</td><td>-73.6</td></tr></table>
316
+
317
+ Table 4: Generalisation performance comparing MXGNet model variants against WReN. 'Diff.' is the difference between the test and the validation performances.
abstractdiagrammaticreasoningwithmultiplexgraphnetworks/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97ca2a48cba59f5a166a2b6977273a9da448325f95a2ffd2f90366e4384d1258
3
+ size 635039
abstractdiagrammaticreasoningwithmultiplexgraphnetworks/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a21e0ed8b2f7c8d03579bfa26d6f31c951a76dfd6e7932b379c3031271b1336
3
+ size 462520
acceleratingsgdwithmomentumforoverparameterizedlearning/4b89d651-c255-4e17-98d9-35e139e923c2_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edf98b162f551512dc2acf2e1eb9905035a34ca7e09daa64accbfbd572e14da5
3
+ size 198653
acceleratingsgdwithmomentumforoverparameterizedlearning/4b89d651-c255-4e17-98d9-35e139e923c2_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:874daabd38eaf4daffc382187feda03d8bdcc6be2c05f1344c7374232ffb3297
3
+ size 226493
acceleratingsgdwithmomentumforoverparameterizedlearning/4b89d651-c255-4e17-98d9-35e139e923c2_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd1a46a8fe2d4e9e3d7af58a685cf67ffbe184b818719dd5c84a4ba5cdb80c41
3
+ size 1644169
acceleratingsgdwithmomentumforoverparameterizedlearning/full.md ADDED
The diff for this file is too large to render. See raw diff
 
acceleratingsgdwithmomentumforoverparameterizedlearning/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f0f5a64525fe7450e4c1a2b75a969e0eadd3fa114870ed5e311e86beea784a8
3
+ size 1137048
acceleratingsgdwithmomentumforoverparameterizedlearning/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e569f550c851c698e068d001672d9f3868e8e79ab4cad497617cc34c2c0e464
3
+ size 1342392
acloserlookatdeeppolicygradients/529cce00-db0f-4816-97bb-f8e220d0a463_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bea3c3f7f046a18069f343a8189876bf69e9be6b73e89c851e6ce925b81d7b4
3
+ size 104864
acloserlookatdeeppolicygradients/529cce00-db0f-4816-97bb-f8e220d0a463_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03f18aaa238d4abbfe93d53132baa557db6565274898b086fbc42d5c2f049cb7
3
+ size 122267
acloserlookatdeeppolicygradients/529cce00-db0f-4816-97bb-f8e220d0a463_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4a07745fb823abed8809f6486f82b6cdb4151ad4c6c8f713873bbc03ca1b8d1
3
+ size 1711722
acloserlookatdeeppolicygradients/full.md ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A CLOSER LOOK AT DEEP POLICY GRADIENTS
2
+
3
+ Andrew Ilyas\*, Logan Engstrom\*, Shibani Santurkar $^{1}$ , Dimitris Tsipras $^{1}$ , Firdaus Janoos $^{2}$ , Larry Rudolph $^{1,2}$ , and Aleksander Madyr $^{1}$
4
+
5
+ $^{1}$ MIT $^{2}$ Two Sigma {ailyas,engstrom,shibani,tsipras,madry}@mit.edu rudolph@csail.mit.edu, firdaus.janoos@twosigma.com
6
+
7
+ # ABSTRACT
8
+
9
+ We study how the behavior of deep policy gradient algorithms reflects the conceptual framework motivating their development. To this end, we propose a fine-grained analysis of state-of-the-art methods based on key elements of this framework: gradient estimation, value prediction, and optimization landscapes. Our results show that the behavior of deep policy gradient algorithms often deviates from what their motivating framework would predict: the surrogate objective does not match the true reward landscape, learned value estimators fail to fit the true value function, and gradient estimates poorly correlate with the "true" gradient. The mismatch between predicted and empirical behavior we uncover highlights our poor understanding of current methods, and indicates the need to move beyond current benchmark-centric evaluation methods.
10
+
11
+ # 1 INTRODUCTION
12
+
13
+ Deep reinforcement learning (RL) is behind some of the most publicized achievements of modern machine learning (Silver et al., 2017; OpenAI, 2018; Dayarathna et al., 2016; OpenAI et al., 2018). In fact, to many, this framework embodies the promise of the real-world impact of machine learning. However, the deep RL toolkit has not yet attained the same level of engineering stability as, for example, the current deep (supervised) learning framework. Indeed, recent studies demonstrate that state-of-the-art deep RL algorithms suffer from oversensitivity to hyperparameter choices, lack of consistency, and poor reproducibility (Henderson et al., 2017).
14
+
15
+ This state of affairs suggests that it might be necessary to re-examine the conceptual underpinnings of deep RL methodology. More precisely, the overarching question that motivates this work is:
16
+
17
+ To what degree does current practice in deep RL reflect the principles informing its development?
18
+
19
+ Our specific focus is on deep policy gradient methods, a widely used class of deep RL algorithms. Our goal is to explore the extent to which state-of-the-art implementations of these methods succeed at realizing the key primitives of the general policy gradient framework.
20
+
21
+ Our contributions. We take a broader look at policy gradient algorithms and their relation to their underlying framework. With this perspective in mind, we perform a fine-grained examination of key RL primitives as they manifest in practice. Concretely, we study:
22
+
23
+ - Gradient Estimation: we find that even when agents improve in reward, their gradient estimates used in parameter updates poorly correlate with the "true" gradient. We additionally show that gradient estimate quality decays with training progress and task complexity. Finally, we demonstrate that varying the sample regime yields training dynamics that are unexplained by the motivating framework and run contrary to supervised learning intuition.
24
+ - Value Prediction: our experiments indicate that value networks successfully solve the supervised learning task they are trained on, but do not fit the true value function. Additionally, employing a value network as a baseline function only marginally decreases the
25
+
26
+ variance of gradient estimates compared to using true value as a baseline (but still dramatically increases agent's performance compared to using no baseline at all).
27
+
28
+ - Optimization Landscapes: we show that the optimization landscape induced by modern policy gradient algorithms is often not reflective of the underlying true reward landscape, and that the latter is frequently poorly behaved in the relevant sample regime.
29
+
30
+ Overall, our results demonstrate that the motivating theoretical framework for deep RL algorithms is often unpredictable of phenomena arising in practice. This suggests that building reliable deep RL algorithms requires moving past benchmark-centric evaluations to a multi-faceted understanding of their often unintuitive behavior. We conclude (in Section 3) by discussing several areas where such understanding is most critically needed.
31
+
32
+ # 2 EXAMINING THE PRIMITIVEIS OF DEEP POLICY GRADIENT ALGORITHMS
33
+
34
+ In this section, we investigate the degree to which our theoretical understanding of RL applies to modern methods. We consider key primitives of policy gradient algorithms: gradient estimation, value prediction and reward fitting. In what follows, we perform a fine-grained analysis of state-of-the-art policy gradient algorithms (PPO and TRPO) through the lens of these primitives—detailed preliminaries, background, and notation can be found in Appendix A.1.
35
+
36
+ # 2.1 GRADIENT ESTIMATE QUALITY
37
+
38
+ A central premise of policy gradient methods is that stochastic gradient ascent on a suitable objective function yields a good policy. These algorithms use as a primitive the gradient of that objective function:
39
+
40
+ $$
41
+ \hat {g} = \nabla_ {\theta} \mathbb {E} _ {(s _ {t}, a _ {t}) \sim \pi_ {0}} \left[ \frac {\pi_ {\theta} (a _ {t} | s _ {t})}{\pi_ {0} (a _ {t} | s _ {t})} \widehat {A} _ {\pi_ {0}} (s _ {t}, a _ {t}) \right] = \mathbb {E} _ {(s _ {t}, a _ {t}) \sim \pi_ {0}} \left[ \frac {\nabla_ {\theta} \pi_ {\theta} (a _ {t} | s _ {t})}{\pi_ {0} (a _ {t} | s _ {t})} \widehat {A} _ {\pi_ {0}} (s _ {t}, a _ {t}) \right], \tag {1}
42
+ $$
43
+
44
+ where in the above we use standard RL notation (see Appendix A.1 for more details). An underlying assumption behind these methods is that we have access to a reasonable estimate of this quantity. This assumption effectively translates into an assumption that we can accurately estimate the expectation above using an empirical mean of finite (typically $\sim 10^3$ ) samples. Evidently (since the agent attains a high reward) these estimates are sufficient to consistently improve reward—we are thus interested in the relative quality of these gradient estimates in practice, and the effect of gradient quality on optimization.
45
+
46
+ ![](images/61d7ff0538997cdefd93eaaf2dbbdb90eb883acd07437b442d85aa6587655b44.jpg)
47
+ Figure 1: Empirical variance of the estimated gradient (c.f. (1)) as a function of the number of state-action pairs used in estimation in the MuJoCo Humanoid task. We measure the average pairwise cosine similarity between ten repeated gradient measurements taken from the same policy, with the $95\%$ confidence intervals (shaded). For each algorithm, we perform multiple trials with the same hyperparameter configurations but different random seeds, shown as repeated lines in the figure. The vertical line (at $x = 2\mathrm{K}$ ) indicates the sample regime used for gradient estimation in standard implementations of policy gradient methods. In general, it seems that obtaining tightly concentrated gradient estimates would require significantly more samples than are used in practice, particularly after the first few timesteps. For other tasks – such as Walker2d-v2 and Hopper-v2 – the plots (seen in Appendix Figure 9) have similar trends, except that gradient variance is slightly lower. Confidence intervals calculated with 500 sample bootstrapping.
48
+
49
+ ![](images/bf30045db978b443822c2a6c1c62716da52331f31c02b6388f404dbe43411c35.jpg)
50
+
51
+ ![](images/5c8c5dfbbe0bbe0a812eda2bce801e7877b65734b8b6dca8dabf47e9f9a29d02.jpg)
52
+
53
+ ![](images/3ec66efdb71546fc430b2417d598d3e931133272666ad9693946fc5a0467d7b5.jpg)
54
+
55
+ ![](images/f96d258376a4221830924883887200c8ebe950f84a93aa98d47695cf06547a25.jpg)
56
+ Figure 2: Convergence of gradient estimates (c.f. (1)) to the "true" expected gradient in the MuJoCo Humanoid task. We measure the mean cosine similarity between the "true" gradient approximated using ten million state-action pairs, and ten gradient estimates which use increasing numbers of state-action pairs (with $95\%$ confidence intervals). For each algorithm, we perform multiple trials with the same hyperparameter configurations but different random seeds. The vertical line (at $x = 2\mathrm{K}$ ) indicates the sample regime used for gradient estimation in standard implementations of policy gradient methods. Observe that although it is possible to empirically estimate the true gradient, this requires several-fold more samples than are used commonly in practical applications of these algorithms. See additionally that the estimation task becomes more difficult further into training. For other tasks – such as Walker2d-v2 and Hopper-v2 – the plots (seen in Appendix Figure 10) have similar trends, except that gradient estimation is slightly better. Confidence intervals calculated with 500 sample bootstrapping.
57
+
58
+ ![](images/bba4835501368ca383f50d0f9d8b2f33b874255391df5bac84606f9f23582504.jpg)
59
+
60
+ ![](images/5be0cceec19ade49bc284688a3eb2642c7fa95a0cd9f0d71717a57dac2cf033b.jpg)
61
+
62
+ ![](images/344745b39152530967a0f7932c853b9d095d0566a3e048a3605ecfbced4e4182.jpg)
63
+
64
+ How accurate are the gradient estimates we compute? To answer this question, we examine two of the most natural measures of estimate quality: the empirical variance and the convergence to the "true" gradient. To evaluate the former, we measure the average pairwise cosine similarity between estimates of the gradient computed from the same policy with independent rollouts (Figure 1). We evaluate the latter by first forming an estimate of the true gradient with a large number of state-action pairs. We then examine the convergence of gradient estimates to this "true" gradient (which we once again measure using cosine similarity) as we increase the number of samples (Figure 2).
65
+
66
+ We observe that deep policy gradient methods operate with relatively poor estimates of the gradient, especially as task complexity increases and as training progresses (contrast Humanoid-v2, a "hard" task, to other tasks and contrast successive checkpoints in Figures 1 and 2). This is in spite of the fact that our agents continually improve throughout training, and attain nowhere near the maximum reward possible on each task. In fact, we sometimes observe a zero or even negative correlation in the relevant sample regime<sup>1</sup>.
67
+
68
+ While these results might be reminiscent of the well-studied "noisy gradients" problem in supervised learning (Robbins & Monro, 1951; d'Aspremont, 2008; Kawaguchi, 2016; Safran & Shamir, 2018; Livni et al., 2014; Keskar et al., 2016; Hochreiter & Schmidhuber, 1997), we have very little understanding of how gradient quality affects optimization in the substantially different reinforcement learning setting. For example:
69
+
70
+ - The sample regime in which RL algorithms operate seems to have a profound impact on the robustness and stability of agent training—in particular, many of the sensitivity issues reported by Henderson et al. (2017) are claimed to disappear (Sutskever, 2018) in higher-sample regimes. Understanding the implications of working in this sample regime, and more generally the impact of sample complexity on training stability remains to be precisely understood.
71
+ - Agent policy networks are trained concurrently with value networks (discussed more in the following section) meant to reduce the variance of gradient estimates. Under our conceptual framework, we might expect these networks to help gradient estimates more as training progresses, contrary to what we observe in Figure 1. The value network also makes the now two-player optimization landscape and training dynamics even more difficult to grasp, as such interactions are poorly understood.
72
+
73
+ - The relevant measure of sample complexity for many settings (number of state-action pairs) can differ drastically from the number of independent samples used at each training iteration (the number of complete trajectories). The latter quantity (a) tends to be much lower than the number of state-action pairs, and (b) decreases across iterations during training.
74
+
75
+ All the above factors make it unclear to what degree our intuition from classical settings transfer to the deep RL regime. And the policy gradient framework, as of now, provides little predictive power regarding the variance of gradient estimates and its impact on reward optimization.
76
+
77
+ Our results indicate that despite having a rigorous theoretical framework for RL, we lack a precise understanding of the structure of the reward landscape and optimization process.
78
+
79
+ # 2.2 VALUE PREDICTION
80
+
81
+ Our findings from the previous section motivate a deeper look into gradient estimation. After all, the policy gradient in its original formulation (Sutton et al., 1999) is known to be hard to estimate, and thus algorithms employ a variety of variance reduction methods. The most popular of these techniques is a baseline function. Concretely, an equivalent form of the policy gradient is given by:
82
+
83
+ $$
84
+ \widehat {g} _ {\theta} = \mathbb {E} _ {\tau \sim \pi_ {\theta}} \left[ \sum_ {(s _ {t}, a _ {t}) \in \tau} \nabla_ {\theta} \log \pi_ {\theta} \left(a _ {t} \mid s _ {t}\right) \cdot \left(Q _ {\pi_ {\theta}} \left(s _ {t}, a _ {t}\right) - b \left(s _ {t}\right)\right) \right] \tag {2}
85
+ $$
86
+
87
+ where $b(s_{t})$ is some fixed function of the state $s_t$ . A canonical choice of baseline function is the value function $V_{\pi}(s)$ , the expected return from a given state (more details and motivation in A.1):
88
+
89
+ $$
90
+ V _ {\pi_ {\theta}} \left(s _ {t}\right) = \mathbb {E} _ {\pi_ {\theta}} \left[ R _ {t} \mid s _ {t} \right]. \tag {3}
91
+ $$
92
+
93
+ Indeed, fitting a value-estimating function (Schulman et al., 2015c; Sutton & Barto, 2018) (a neural network, in the deep RL setting) and using it as a baseline function is precisely the approach taken by most deep policy gradient methods. Concretely, one trains a value network $V_{\theta_t}^{\pi}$ such that:
94
+
95
+ $$
96
+ \theta_ {t} = \min _ {\theta} \mathbb {E} \left[ \left(V _ {\theta} ^ {\pi} \left(s _ {t}\right) - \left(V _ {\theta_ {t - 1}} ^ {\pi} \left(s _ {t}\right) + A _ {t}\right)\right) ^ {2} \right] \tag {4}
97
+ $$
98
+
99
+ where $V_{\theta_{t-1}}^{\pi}(s_t)$ are estimates given by the last value function, and $A_t$ is the advantage of the policy, i.e. the returns minus the estimated values. (Typically, $A_t$ is estimated using generalized advantage estimation, as described in (Schulman et al., 2015c).) Our findings in the previous section prompt us to take a closer look at the value network and its impact on the variance of gradient estimates.
100
+
101
+ ![](images/6b4c234991ebe700ed81ef1a6a9b3afbf03141794346680933b3faba5d4cf7d8.jpg)
102
+ (a)
103
+
104
+ ![](images/43d18f8e98130bd4a3942e7e6165b1c2925a5dd582f24b2bb5d0e861911493a4.jpg)
105
+ Figure 3: Quality of value prediction in terms of mean relative error (MRE) on heldout state-action pairs for agents trained to solve the MuJoCo Walker2d-v2 task. We observe in (left) that the agents do indeed succeed at solving the supervised learning task they are trained for—the MRE on the GAE-based value loss $(V_{old} + A_{GAE})^2$ (c.f. (4)) is small. On the other hand, in (right) we see that the returns MRE is still quite high—the learned value function is off by about $50\%$ with respect to the underlying true value function. Similar plots for other MuJoCo tasks are in Appendix A.5.
106
+
107
+ ![](images/c93caa9e58705e073fd06ca5d37fa1b1c5095a46ab046a46ad9a8efda1242eca.jpg)
108
+
109
+ ![](images/6178f5a29134e1d5ec33effb1235d2cbdf23102b4f1e55632ed9d4c8d1c5c721.jpg)
110
+
111
+ ![](images/7406a8ff1ccd516cd3ef6251d01b8fe86df5ca5cd7c3fba37881964de6a8fb1d.jpg)
112
+
113
+ ![](images/ff9c2a6d57127ae74d4d55219ce57ecd1d7bdac7d49e2422f02ac37e0a6a5f27.jpg)
114
+ Figure 4: Efficacy of the value network as a variance reducing baseline for Walker2d-v2 (top) and Hopper-v2 (bottom) agents. We measure the empirical variance of the gradient (c.f. (1)) as a function of the number of state-action pairs used in estimation, for different choices of baseline functions: the value network (used by the agent in training), the "true" value function (fit to the returns using $5 \cdot 10^{6}$ state-action pairs sampled from the current policy) and the "zero" value function (i.e. replacing advantages with returns). We observe that using the true value function leads to a significantly lower-variance estimate of the gradient compared to the value network. In turn, employing the value network yields a noticeable variance reduction compared to the zero baseline function, even though this difference may appear rather small in the small-sample regime (2K). Confidence intervals calculated with 10 sample bootstrapping.
115
+
116
+ ![](images/0c6f866472174745def469e80a3a1d260ef15f14bb7f804d62822dd6b70e9a87.jpg)
117
+ (b) Hopper-v2
118
+
119
+ ![](images/d2ed880c6c7cc9599ff2377800f6e35a1dd38ed5c86bdf8fb7773b86c02b8a01.jpg)
120
+
121
+ Value prediction as a supervised learning problem. We first analyze the value network through the lens of the supervised learning problem it solves. After all, (4) describes an empirical risk minimization, where a loss is minimized over a set of sampled $(s_t, a_t)$ . So, how does $V_{\theta}^{\pi}$ perform as a solution to (4)? And in turn, how does (4) perform as a proxy for learning the true value function?
122
+
123
+ Our results (Figure 3a) show that the value network does succeed at both fitting the given loss function and generalizing to unseen data, showing low and stable mean relative error (MRE). However, the significant drop in performance as shown in Figure 3 indicates that the supervised learning problem induced by (4) does not lead to $V_{\theta}^{\pi}$ learning the underlying true value function.
124
+
125
+ Does the value network lead to a reduction in variance? Though evaluating the $V_{\theta}^{\pi}$ baseline function as a value predictor as we did above is informative, in the end the sole purpose of the value function is to reduce variance. So: how does using our value function actually impact the variance of our gradient estimates? To answer this question, we compare the variance reduction that results from employing our value network against both a "true" value function and a trivial "zero" baseline function (i.e. simply replacing advantages with returns). Our results, captured in Figure 4, show that the "true" value function yields a much lower-variance estimate of the gradient. This is especially true in the sample regime in which we operate. We note, however, that despite not effectively predicting the true value function or inducing the same degree of variance reduction, the value network does help to some degree (compared to the "zero" baseline). Additionally, the seemingly marginal increase in gradient correlation provided by the value network (compared to the "true" baseline function) turns out to result in a significant improvement in agent performance. (Indeed, agents trained without a baseline reach almost an order of magnitude worse reward.)
126
+
127
+ Our findings suggest that we still need a better understanding of the role of the value network in agent training, and raise several questions that we discuss in Section 3.
128
+
129
+ # 2.3 EXPLORING THE OPTIMIZATION LANDSCAPE
130
+
131
+ Another key assumption of policy gradient algorithms is that first-order updates (w.r.t. policy parameters) actually yield better policies. It is thus natural to examine how valid this assumption is.
132
+
133
+ The true rewards landscape. We begin by examining the landscape of agent reward with respect to the policy parameters. Indeed, even if deep policy gradient methods do not optimize for the true reward directly (e.g. if they use a surrogate objective), the ultimate goal of any policy gradient algorithm is to navigate this landscape. First, Figure 5 shows that while estimating the true reward landscape with a high number of samples yields a relatively smooth reward landscape (perhaps suggesting viability of direct reward optimization), estimating the true reward landscape in the typical, low sample regime results in a landscape that appears jagged and poorly-behaved. The low-sample regime thus gives rise to a certain kind of barrier to direct reward optimization. Indeed, applying our algorithms in this regime makes it impossible to distinguish between good and bad points in the landscape, even though the true underlying landscape is fairly well-behaved.
134
+
135
+ The surrogate objective landscape. The untamed nature of the rewards landscape has led to the development of alternate approaches to reward maximization. Recall that an important element of many modern policy gradient methods is the maximization of a surrogate objective function in place of the true rewards (the exact mechanism behind the surrogate objective is detailed in Appendix A.1, and particularly in (14)). The surrogate objective, based on relaxing the policy improvement theorem of Kakade and Langford (Kakade & Langford, 2002), can be viewed as a simplification of the reward maximization objective.
136
+
137
+ As a purported approximation of the true returns, one would expect that the surrogate objective landscape approximates the true reward landscape fairly well. That is, parameters corresponding to good surrogate objective will also correspond to good true reward.
138
+
139
+ Figure 6 shows that in the early stages of training, the optimization landscapes of the true reward and surrogate objective are indeed approximately aligned. However, as training progresses, the surrogate objective becomes much less predictive of the true reward in the relevant sample regime. In particular, we often observe that directions that increase the surrogate objective lead to a decrease of the true reward (see Figures 6, 7). In a higher-sample regime (using several orders of magnitude more samples), we find that PPO and TRPO turn out to behave rather differently. In the case of TRPO, the update direction following the surrogate objective matches the true reward much more closely. However, for PPO we consistently observe landscapes where the step direction leads to lower true reward, even in the high-sample regime. This suggests that even when estimated accurately enough,
140
+
141
+ ![](images/cb6e7f2dc9c8fa5808c8b0ec5739c18868075eee2f7cb81bb2ba67157c9acca5.jpg)
142
+ 2,000 state-action pairs
143
+
144
+ ![](images/3dd157e9fa44ff34900bd40a7cebe5f36ac779dbe0bf77a5d886705fde9cdd06.jpg)
145
+ 20,000 state-action pairs
146
+ Figure 5: True reward landscape concentration for TRPO on Humanoid-v2. We visualize the landscape at a training iteration 150 while varying the number of trajectories used in reward estimation (each subplot), both in the direction of the step taken and a random direction. Moving one unit along the "step direction" axis corresponds to moving one full step in parameter space. In the random direction one unit corresponds to moving along a random norm 2 Gaussian vector in the parameter space. In practice, the norm of the step is typically an order of magnitude lower than the random direction. While the landscape is very noisy in the low-sample regime, large numbers of samples reveal a well-behaved underlying landscape. See Figures 20, 19 of the Appendix for additional plots.
147
+
148
+ ![](images/a7f7f623af9b040b3f3797210ec22185d04f72e575fe970d48682e3915ace240.jpg)
149
+ 100,000 state-action pairs
150
+
151
+ ![](images/9a40ab2d4a9cc6426270668e0c1d88923611cee32c69ba663d2ada02370b3c73.jpg)
152
+ Figure 6: True reward and surrogate objective landscapes for TRPO on the Humanoid-v2 MuJoCo task. We visualize the landscapes in the direction of the update step and a random direction (as in Figure 5). The surrogate objective corresponds to the actual function optimized by the algorithm at each step. We estimate true reward with $10^{6}$ state-action pairs per point. We compare the landscapes at different points in training and with varying numbers of state-action pairs used in the update step. Early in training the true and surrogate landscapes align fairly well in both sample regimes, but later become misaligned in the low-sample regime. More landscapes in Appendix Figures 13-18.
153
+
154
+ the surrogate objective might not be an accurate proxy for the true reward. (Recall from Section 2.1 that this is a sample regime where we are able to estimate the true gradient of the reward fairly well.)
155
+
156
+ # 3 TOWARDS STRONGER FOUNDATIONS FOR DEEP RL
157
+
158
+ Deep reinforcement learning (RL) algorithms have shown great practical promise, and are rooted in a well-grounded theoretical framework. However, our results indicate that this framework often fails to provide insight into the practical performance of these algorithms. This disconnect impedes our understanding of why these algorithms succeed (or fail), and is a major barrier to addressing key challenges facing deep RL such as brittleness and poor reproducibility.
159
+
160
+ To close this gap, we need to either develop methods that adhere more closely to theory, or build theory that can capture what makes existing policy gradient methods successful. In both cases, the first step is to precisely pinpoint where theory and practice diverge. To this end, we analyze and consolidate our findings from the previous section.
161
+
162
+ Gradient estimation. Our analysis in Section 2.1 shows that the quality of gradient estimates that deep policy gradient algorithms use is rather poor. Indeed, even when agents improve, such gradient estimates often poorly correlate with the true gradient (c.f. Figure 2). We also note that gradient correlation decreases as training progresses and task complexity increases. While this certainly does not preclude the estimates from conveying useful signal, the exact underpinnings of this phenomenon in deep RL still elude us. In particular, in Section 2.1 we outline a few keys ways in which the deep RL setting is quite unique and difficult to understand from an optimization perspective, both theoretically and in practice. Overall, understanding the impact of gradient estimate quality on deep RL algorithms is challenging and largely unexplored.
163
+
164
+ Value prediction. The findings presented in Section 2.2 identify two key issues. First, while the value network successfully solves the supervised learning task it is trained on, it does not accurately model the "true" value function. Second, employing the value network as a baseline does decrease the gradient variance (compared to the trivial ("zero") baseline). However, this decrease is rather marginal compared to the variance reduction offered by the "true" value function.
165
+
166
+ It is natural to wonder whether this failure in modeling the value function is inevitable. For example, how does the loss function used to train the value network impact value prediction and variance reduction? More broadly, we lack an understanding of the precise role of the value network in
167
+
168
+ ![](images/2bb407d4d8dc5ec45e8e328f80bd5efb3fa06ef0d5231013706b9209232add05.jpg)
169
+ Figure 7: True reward and surrogate objective landscapes for PPO on the Humanoid-v2 MuJoCo task. See Figure 6 for a description. We observe that early in training the true and surrogate landscapes align well. However, later increasing the surrogate objective leads to lower true reward.
170
+
171
+ training. Can we empirically quantify the relationship between variance reduction and performance? And does the value network play a broader role than just variance reduction?
172
+
173
+ Optimization landscape. We have also seen, in Section 2.3, that the optimization landscape induced by modern policy gradient algorithms, the surrogate objective, is often not reflective of the underlying true reward landscape. We thus need a deeper understanding of why current methods succeed despite these issues, and, more broadly, how to better navigate the true reward landscape.
174
+
175
+ # 4 RELATED WORK
176
+
177
+ The idea of using gradient estimates to update neural network-based RL agents dates back at least to the REINFORCE (Williams, 1992) algorithm. Later, Sutton (Sutton et al., 1999) established a unifying framework casting these algorithms as instances of the policy gradient class of algorithms.
178
+
179
+ Our work focuses on proximal policy optimization (PPO) (Schulman et al., 2017) and trust region policy optimization (TRPO) (Schulman et al., 2015a), which are two of the most prominent policy gradient algorithms used in deep RL, drawing inspiration from works on related algorithms, such as (Peters et al., 2010) and Kakade (2001).
180
+
181
+ Many recent works document the brittleness of deep RL algorithms (Henderson et al., 2018; 2017; Islam et al., 2017). (Rajeswaran et al., 2017) and (Mania et al., 2018) demonstrate that on many benchmark tasks, state-of-the-art performance can be attained by augmented randomized search approaches. McCandlish et al. (2018) investigates gradient noise in large-batch settings, and Ahmed et al. (2018) investigates the role of entropy regularization (which we do not study) on optimization.
182
+
183
+ # 5 CONCLUSION
184
+
185
+ In this work, we analyze the degree to which key primitives of deep policy gradient algorithms follow their conceptual underpinnings. Our experiments show that these primitives often do not conform to the expected behavior: gradient estimates poorly correlate with the true gradient, better gradient estimates can require lower learning rates and can induce degenerate agent behavior, value networks reduce gradient estimation variance to a significantly smaller extent than the true value, and the underlying optimization landscape can be misleading.
186
+
187
+ This demonstrates that there is a significant gap between the theory inspiring current algorithms and the actual mechanisms driving their performance. Overall, our findings suggest that developing a deep RL toolkit that is truly robust and reliable will require moving beyond the current benchmark-driven evaluation model to a more fine-grained understanding of deep RL algorithms.
188
+
189
+ # REFERENCES
190
+
191
+ Zafarali Ahmed, Nicolas Le Roux, Mohammad Norouzi, and Dale Schuurmans. Understanding the impact of entropy on policy optimization, 2018.
192
+ Alexandre d'Aspremont. Smooth optimization with approximate gradient. SIAM Journal on Optimization, 19:1171-1183, 2008.
193
+ Miyuru Dayarathna, Yonggang Wen, and Rui Fan. Data center energy consumption modeling: A survey. IEEE Communications Surveys & Tutorials, 18(1):732-794, 2016.
194
+ Peter Henderson, Riashat Islam, Philip Bachman, Joelle Pineau, Doina Precup, and David Meger. Deep reinforcement learning that matters. arXiv preprint arXiv:1709.06560, 2017.
195
+ Peter Henderson, Joshua Romoff, and Joelle Pineau. Where did my optimum go?: An empirical analysis of gradient descent optimization in policy gradient methods, 2018.
196
+ Sepp Hochreiter and Jürgen Schmidhuber. Flat minima. Neural Computation, 9:1-42, 1997.
197
+ Riashat Islam, Peter Henderson, Maziar Gomrokchi, and Doina Precup. Reproducibility of benchmarked deep reinforcement learning tasks for continuous control. In ICML Reproducibility in Machine Learning Workshop, 2017.
198
+ Sham M. Kakade. A natural policy gradient. In NIPS, 2001.
199
+ Sham M. Kakade and John Langford. Approximately optimal approximate reinforcement learning. In ICML, 2002.
200
+ Kenji Kawaguchi. Deep learning without poor local minima. In NIPS, 2016.
201
+ Nitish Shirish Keskar, Dheevatsa Mudigere, Jorge Nocedal, Mikhail Smelyanskiy, and Ping Tak Peter Tang. On large-batch training for deep learning: Generalization gap and sharp minima. CoRR, abs/1609.04836, 2016.
202
+ Roi Livni, Shai Shalev-Shwartz, and Ohad Shamir. On the computational efficiency of training neural networks. In NIPS, 2014.
203
+ Horia Mania, Aurelia Guy, and Benjamin Recht. Simple random search provides a competitive approach to reinforcement learning. CoRR, abs/1803.07055, 2018.
204
+ Sam McCandlish, Jared Kaplan, Dario Amodei, and OpenAI Dota Team. An empirical model of large-batch training, 2018.
205
+ OpenAI. Openai five. https://blog.openai.com/openai-five/, 2018.
206
+ OpenAI, :, Marcin Andrychowicz, Bowen Baker, Maciek Chociej, Rafal Jozefowicz, Bob McGrew, Jakub Pachocki, Arthur Petron, Matthias Plappert, Glenn Powell, Alex Ray, Jonas Schneider, Szymon Sidor, Josh Tobin, Peter Welinder, Lilian Weng, and Wojciech Zaremba. Learning dexterous in-hand manipulation, 2018.
207
+ Jan Peters, Katharina Mulling, and Yasemin Altun. Relative entropy policy search. In AAAI, 2010.
208
+ Aravind Rajeswaran, Kendall Lowrey, Emanuel Todorov, and Sham M. Kakade. Towards generalization and simplicity in continuous control. In NIPS, 2017.
209
+ Herbert Robbins and Sutton Monro. A stochastic approximation method. Ann. Math. Statist., 22(3): 400-407, 09 1951. doi: 10.1214/aoms/1177729586. URL https://doi.org/10.1214/aoms/1177729586.
210
+ Itay Safran and Ohad Shamir. Spurious local minima are common in two-layer relu neural networks. In ICML, 2018.
211
+ John Schulman, Sergey Levine, Pieter Abbeel, Michael Jordan, and Philipp Moritz. Trust region policy optimization. In International Conference on Machine Learning, pp. 1889-1897, 2015a.
212
+
213
+ John Schulman, Philipp Moritz, Sergey Levine, Michael Jordan, and Pieter Abbeel. High-dimensional continuous control using generalized advantage estimation. arXiv preprint arXiv:1506.02438, 2015b.
214
+ John Schulman, Philipp Moritz, Sergey Levine, Michael I. Jordan, and Pieter Abbeel. High-dimensional continuous control using generalized advantage estimation. CoRR, abs/1506.02438, 2015c.
215
+ John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.
216
+ David Silver, Julian Schrittwieser, Karen Simonyan, Ioannis Antonoglou, Aja Huang, Arthur Guez, Thomas Hubert, Lucas Baker, Matthew Lai, Adrian Bolton, et al. Mastering the game of go without human knowledge. Nature, 550(7676):354, 2017.
217
+ Ilya Sutskever. Keynote talk. NVIDIA NTECH, 2018. URL https://www.youtube.com/watch?v=w3ues-NayAs&t=467s.
218
+ Richard S Sutton and Andrew G Barto. Reinforcement learning: An introduction. MIT press, 2018.
219
+ Richard S. Sutton, David A. McAllester, Satinder P. Singh, and Yishay Mansour. Policy gradient methods for reinforcement learning with function approximation. In NIPS, 1999.
220
+ Ronald J. Williams. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine Learning, 8:229-256, 1992.
221
+
222
+ # A APPENDIX
223
+
224
+ # A.1 BACKGROUND
225
+
226
+ In the reinforcement learning (RL) setting, an agent interacts with a stateful environment with the goal of maximizing cumulative reward. Formally, we model the environment as a (possibly randomized) function mapping its current state $s$ and an action $a$ supplied by the agent to a new state $s'$ and a resulting reward $r$ . The choice of actions of the agent is governed by the its policy $\pi$ . This policy is a function mapping environment states to a distribution over the actions to take. The objective of an RL algorithm is to find a policy $\pi$ which maximizes the expected cumulative reward, where the expectation is taken over both environment randomness and the (randomized) action choices.
227
+
228
+ Preliminaries and notation. For a given policy $\pi$ , we denote by $\pi(a|s)$ the probability that this policy assigns to taking action $a$ when the environment is in the state $s$ . We use $r(s, a)$ to denote the reward that the agent earns for playing action $a$ in response to the state $s$ . A trajectory $\tau = \{(a_t, s_t) : t \in \{1 \dots T\}\}$ is a sequence of state-action pairs that constitutes a valid transcript of interactions of the agent with the environment. (Here, $a_t$ (resp. $s_t$ ) corresponds to the action taken by the agent (resp. state of the environment) in the $t$ -th round of interaction.) We then define $\pi(\tau)$ to be the probability that the trajectory $\tau$ is executed if the agent follows policy $\pi$ (provided the initial state of the environment is $s_1$ ). Similarly, $r(\tau) = \sum_{t} r(s_t, a_t)$ denotes the cumulative reward earned by the agent when following this trajectory, where $s_t$ (resp. $a_t$ ) denote the $t$ -th state (resp. action) in the trajectory $\tau$ . In the RL setting, however, we often choose to maximize the discounted cumulative reward of a policy $R := R_1$ , where $R_t$ is defined as
229
+
230
+ $$
231
+ R _ {t} (\tau) = \sum_ {t ^ {\prime} = t} ^ {\infty} \gamma^ {(t ^ {\prime} - t)} r _ {t ^ {\prime}}.
232
+ $$
233
+
234
+ and $0 < \gamma < 1$ is a "discount factor". The discount factor ensures that the cumulative reward of a policy is well-defined even for an infinite time horizon, and it also incentivizes achieving reward earlier.
235
+
236
+ Policy gradient methods. A widely used class of RL algorithms that will be the focus of our analysis is the class of so-called policy gradient methods. The central idea behind these algorithms is to first parameterize the policy $\pi_{\theta}$ using a parameter vector $\theta$ . (In the deep RL context, $\pi_{\theta}$ is expressed by a neural network with weights $\theta$ .) Then, we perform stochastic gradient ascent on the cumulative reward with respect to $\theta$ . In other words, we want to apply the stochastic ascent approach to our problem:
237
+
238
+ $$
239
+ \max _ {\theta} \mathbb {E} _ {\tau \sim \pi_ {\theta}} [ r (\tau) ], \tag {5}
240
+ $$
241
+
242
+ where $\tau \sim \pi_{\theta}$ represents trajectories (rollouts) sampled from the distribution induced by the policy $\pi_{\theta}$ . This approach relies on the key observation (Sutton et al., 1999) that under mild conditions, the gradient of our objective can be written as:
243
+
244
+ $$
245
+ \nabla_ {\theta} \mathbb {E} _ {\tau \sim \pi_ {\theta}} [ r (\tau) ] = \mathbb {E} _ {\tau \sim \pi_ {\theta}} [ \nabla_ {\theta} \log (\pi_ {\theta} (\tau)) r (\tau) ], \tag {6}
246
+ $$
247
+
248
+ and the latter quantity can be estimated directly by sampling trajectories according to the policy $\pi_{\theta}$ .
249
+
250
+ When we use the discounted variant of the cumulative reward and note that the action of the policy at time $t$ cannot affect its performance at earlier times, we can express our gradient estimate as:
251
+
252
+ $$
253
+ \widehat {g _ {\theta}} = \mathbb {E} _ {\tau \sim \pi_ {\theta}} \left[ \sum_ {\left(s _ {t}, a _ {t}\right) \in \tau} \nabla_ {\theta} \log \pi_ {\theta} \left(a _ {t} \mid s _ {t}\right) \cdot Q _ {\pi_ {\theta}} \left(s _ {t}, a _ {t}\right) \right], \tag {7}
254
+ $$
255
+
256
+ where $Q_{\pi_{\theta}}(s_t, a_t)$ represents the expected returns after taking action $a_t$ from state $s_t$ :
257
+
258
+ $$
259
+ Q _ {\pi_ {\theta}} \left(s _ {t}, a _ {t}\right) = \mathbb {E} _ {\pi_ {\theta}} \left[ R _ {t} \mid a _ {t}, s _ {t} \right]. \tag {8}
260
+ $$
261
+
262
+ Value estimation and advantage. Unfortunately, the variance of the expectation in (7) can be (and often is) very large, which makes getting an accurate estimate of this expectation quite challenging. To alleviate this issue, a number of variance reduction techniques have been developed. One of the most popular such techniques is the use of a so-called baseline function, wherein a state-dependent value is subtracted from $Q_{\pi_\theta}$ . Thus, instead of estimating (7) directly, we use:
263
+
264
+ $$
265
+ \widehat {g _ {\theta}} = \mathbb {E} _ {\tau \sim \pi_ {\theta}} \left[ \sum_ {(s _ {t}, a _ {t}) \in \tau} \nabla_ {\theta} \log \pi_ {\theta} \left(a _ {t} \mid s _ {t}\right) \cdot \left(Q _ {\pi_ {\theta}} \left(s _ {t}, a _ {t}\right) - b (s _ {t})\right) \right], \tag {9}
266
+ $$
267
+
268
+ where $b(\cdot)$ is a baseline function of our choice.
269
+
270
+ A natural choice of the baseline function is the value function, i.e.
271
+
272
+ $$
273
+ V _ {\pi_ {\theta}} \left(s _ {t}\right) = \mathbb {E} _ {\pi_ {\theta}} \left[ R _ {t} \mid s _ {t} \right]. \tag {10}
274
+ $$
275
+
276
+ When we use the value function as our baseline, the resulting gradient estimation problem becomes:
277
+
278
+ $$
279
+ \widehat {g} _ {\theta} = \mathbb {E} _ {\tau \sim \pi_ {\theta}} \left[ \sum_ {\left(s _ {t}, a _ {t}\right) \in \tau} \nabla_ {\theta} \log \pi_ {\theta} \left(a _ {t} \mid s _ {t}\right) \cdot A _ {\pi_ {\theta}} \left(s _ {t}, a _ {t}\right) \right], \tag {11}
280
+ $$
281
+
282
+ where
283
+
284
+ $$
285
+ A _ {\pi_ {\theta}} \left(s _ {t}, a _ {t}\right) = Q _ {\pi_ {\theta}} \left(s _ {t}, a _ {t}\right) - V _ {\pi_ {\theta}} \left(s _ {t}\right) \tag {12}
286
+ $$
287
+
288
+ is referred to as the advantage of performing action $a_{t}$ . Different methods of estimating $V_{\pi_{\theta}}$ have been proposed, with techniques ranging from moving averages to the use of neural network predictors Schulman et al. (2015b).
289
+
290
+ Surrogate Objective. So far, our focus has been on extracting a good estimate of the gradient with respect to the policy parameters $\theta$ . However, it turns out that directly optimizing the cumulative rewards can be challenging. Thus, a modification used by modern policy gradient algorithms is to optimize a "surrogate objective" instead. We will focus on maximizing the following local approximation of the true reward Schulman et al. (2015a):
291
+
292
+ $$
293
+ \max _ {\theta} \mathbb {E} _ {\left(s _ {t}, a _ {t}\right) \sim \pi} \left[ \frac {\pi_ {\theta} \left(a _ {t} \mid s _ {t}\right)}{\pi \left(a _ {t} \mid s _ {t}\right)} A _ {\pi} \left(s _ {t}, a _ {t}\right) \right] \quad \left(= \mathbb {E} _ {\pi_ {\theta}} [ A _ {\pi} ]\right), \tag {13}
294
+ $$
295
+
296
+ or the normalized advantage variant proposed to reduce variance Schulman et al. (2017):
297
+
298
+ $$
299
+ \max _ {\theta} \mathbb {E} _ {\left(s _ {t}, a _ {t}\right) \sim \pi} \left[ \frac {\pi_ {\theta} \left(a _ {t} \mid s _ {t}\right)}{\pi \left(a _ {t} \mid s _ {t}\right)} \widehat {A} _ {\pi} \left(s _ {t}, a _ {t}\right) \right] \tag {14}
300
+ $$
301
+
302
+ where
303
+
304
+ $$
305
+ \widehat {A} _ {\pi} = \frac {A _ {\pi} - \mu \left(A _ {\pi}\right)}{\sigma \left(A _ {\pi}\right)} \tag {15}
306
+ $$
307
+
308
+ and $\pi$ is the current policy.
309
+
310
+ Trust region methods. The surrogate objective function, although easier to optimize, comes at a cost: the gradient of the surrogate objective is only predictive of the policy gradient locally (at the current policy). Thus, to ensure that our update steps we derive based on the surrogate objective are predictive, they need to be confined to a "trust region" around the current policy. The resulting trust region methods (Kakade, 2001; Schulman et al., 2015a; 2017) try to constrain the local variation of the parameters in policy-space by restricting the distributional distance between successive policies.
311
+
312
+ A popular method in this class is trust region policy optimization (TRPO) Schulman et al. (2015a), which constrains the KL divergence between successive policies on the optimization trajectory, leading to the following problem:
313
+
314
+ $$
315
+ \max _ {\theta} \mathbb {E} _ {(s _ {t}, a _ {t}) \sim \pi} \left[ \frac {\pi_ {\theta} (a _ {t} | s _ {t})}{\pi (a _ {t} | s _ {t})} \widehat {A} _ {\pi} (s _ {t}, a _ {t}) \right]
316
+ $$
317
+
318
+ $$
319
+ \text {s . t .} D _ {K L} \left(\pi_ {\theta} (\cdot | s) \| \pi (\cdot | s)\right) \leq \delta , \quad \forall s. \tag {16}
320
+ $$
321
+
322
+ In practice, this objective is maximized using a second-order approximation of the KL divergence and natural gradient descent, while replacing the worst-case KL constraints over all possible states with an approximation of the mean KL based on the states observed in the current trajectory.
323
+
324
+ Proximal policy optimization. In practice, the TRPO algorithm can be computationally costly—the step direction is estimated with nonlinear conjugate gradients, which requires the computation of multiple Hessian-vector products. To address this issue, Schulman et al. Schulman et al. (2017) propose proximal policy optimization (PPO), which utilizes a different objective and does not compute a projection. Concretely, PPO proposes replacing the KL-constrained objective (16) of TRPO by clipping the objective function directly as:
325
+
326
+ $$
327
+ \left. \max _ {\theta} \mathbb {E} _ {\left(s _ {t}, a _ {t}\right) \sim \pi} \left[ \min \left(\operatorname {c l i p} \left(\rho_ {t}, 1 - \varepsilon , 1 + \varepsilon\right) \widehat {A} _ {\pi} \left(s _ {t}, a _ {t}\right), \rho_ {t} \widehat {A} _ {\pi} \left(s _ {t}, a _ {t}\right)\right) \right] \right. \tag {17}
328
+ $$
329
+
330
+ where
331
+
332
+ $$
333
+ \rho_ {t} = \frac {\pi_ {\theta} \left(a _ {t} \mid s _ {t}\right)}{\pi \left(a _ {t} \mid s _ {t}\right)} \tag {18}
334
+ $$
335
+
336
+ In addition to being simpler, PPO is intended to be faster and more sample-efficient than TRPO (Schulman et al., 2017).
337
+
338
+ # A.2 EXPERIMENTAL SETUP
339
+
340
+ We use the following parameters for PPO and TRPO based on a hyperparameter grid search:
341
+
342
+ Table 1: Hyperparameters for PPO and TRPO algorithms.
343
+
344
+ <table><tr><td></td><td colspan="2">Humanoid-v2</td><td colspan="2">Walker2d-v2</td><td colspan="2">Hopper-v2</td></tr><tr><td></td><td>PPO</td><td>TRPO</td><td>PPO</td><td>TRPO</td><td>PPO</td><td>TRPO</td></tr><tr><td>Timesteps per iteration</td><td>2048</td><td>2048</td><td>2048</td><td>2048</td><td>2048</td><td>2048</td></tr><tr><td>Discount factor (γ)</td><td>0.99</td><td>0.99</td><td>0.99</td><td>0.99</td><td>0.99</td><td>0.99</td></tr><tr><td>GAE discount (λ)</td><td>0.95</td><td>0.95</td><td>0.95</td><td>0.95</td><td>0.95</td><td>0.95</td></tr><tr><td>Value network LR</td><td>0.0001</td><td>0.0003</td><td>0.0003</td><td>0.0003</td><td>0.0002</td><td>0.0002</td></tr><tr><td>Value net num. epochs</td><td>10</td><td>10</td><td>10</td><td>10</td><td>10</td><td>10</td></tr><tr><td>Policy net hidden layers</td><td>[64, 64]</td><td>[64, 64]</td><td>[64, 64]</td><td>[64, 64]</td><td>[64, 64]</td><td>[64, 64]</td></tr><tr><td>Value net hidden layers</td><td>[64, 64]</td><td>[64, 64]</td><td>[64, 64]</td><td>[64, 64]</td><td>[64, 64]</td><td>[64, 64]</td></tr><tr><td>KL constraint (δ)</td><td>N/A</td><td>0.07</td><td>N/A</td><td>0.04</td><td>N/A</td><td>0.13</td></tr><tr><td>Fisher est. fraction</td><td>N/A</td><td>0.1</td><td>N/A</td><td>0.1</td><td>N/A</td><td>0.1</td></tr><tr><td>Conjugate grad. steps</td><td>N/A</td><td>10</td><td>N/A</td><td>10</td><td>N/A</td><td>10</td></tr><tr><td>CG damping</td><td>N/A</td><td>0.1</td><td>N/A</td><td>0.1</td><td>N/A</td><td>0.1</td></tr><tr><td>Backtracking steps</td><td>N/A</td><td>10</td><td>N/A</td><td>10</td><td>N/A</td><td>10</td></tr><tr><td>Policy LR (Adam)</td><td>0.00025</td><td>N/A</td><td>0.0004</td><td>N/A</td><td>0.00045</td><td>N/A</td></tr><tr><td>Policy epochs</td><td>10</td><td>N/A</td><td>10</td><td>N/A</td><td>10</td><td>N/A</td></tr><tr><td>PPO Clipping ε</td><td>0.2</td><td>N/A</td><td>0.2</td><td>N/A</td><td>0.2</td><td>N/A</td></tr><tr><td>Entropy coeff.</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td></tr><tr><td>Reward clipping</td><td>[-10, 10]</td><td>-</td><td>[-10, 10]</td><td>-</td><td>[-10, 10]</td><td>-</td></tr><tr><td>Reward normalization</td><td>On</td><td>Off</td><td>On</td><td>Off</td><td>On</td><td>Off</td></tr><tr><td>State clipping</td><td>[-10, 10]</td><td>-</td><td>[-10, 10]</td><td>-</td><td>[-10, 10]</td><td>-</td></tr></table>
345
+
346
+ All error bars we plot are $95\%$ confidence intervals, obtained via bootstrapped sampling.
347
+
348
+ # A.3 STANDARD REWARD PLOTS
349
+
350
+ ![](images/7d02f121a8ebe849a3e55a70cd684420e419bace798a18191ce334cd0c476a5e.jpg)
351
+ (a) Hopper-v2
352
+
353
+ ![](images/c561b640f7fa04851ac58af7451d3fbaff209bc365db07f9290bc6e02eb7e7bb.jpg)
354
+ (b) Walker2d-v2
355
+
356
+ ![](images/1f89e37172939463e45ba9a8de4b2ea47f45730755400f3499e30cffb69248a8.jpg)
357
+ (c) Humanoid-v2
358
+ Figure 8: Mean reward for the studied policy gradient algorithms on standard MuJoCo benchmark tasks. For each algorithm, we perform 24 random trials using the best performing hyperparameter configuration, with 10 of the random agents shown here.
359
+
360
+ ![](images/dae1a3179cb9c7d300c75dcb0258806996ead8aa287e4d0354039fd51436676f.jpg)
361
+ A.4 QUALITY OF GRADIENT ESTIMATION
362
+
363
+ ![](images/9f987e2265bd2317eb6e8b0f4ee145efec9550493219c643efe6c23aff42a738.jpg)
364
+
365
+ ![](images/55e9ae5a043dcd39e0fa66a596a2ebbc75e71bc5ac11e4462dc4fc45eaaa8478.jpg)
366
+
367
+ ![](images/fcef85cfec393a2a4d79279958fc0e888f63dbf1ce55904eb2f3ca01055f3139.jpg)
368
+
369
+ ![](images/3ea18f18315a2435b6c0f3e6910e3dd0af9f3d36d3349826f45557ff1b856758.jpg)
370
+ Figure 9: Empirical variance of the gradient (c.f. (1)) as a function of the number of state-action pairs used in estimation for policy gradient methods. We obtain multiple gradient estimates using a given number of state-action pairs from the policy at a particular iteration. We then measure the average pairwise cosine similarity between these repeated gradient measurements, along with the $95\%$ confidence intervals (shaded). Each of the colored lines (for a specific algorithm) represents a particular trained agent (we perform multiple trials with the same hyperparameter configurations but different random seeds). The dotted vertical black line (at 2K) indicates the sample regime used for gradient estimation in standard practical implementations of policy gradient methods.
371
+
372
+ ![](images/d85c36672c46c22fdebdbff92bf06fafb1cc61fdd7e1f3cd662d1b76f022d21c.jpg)
373
+ (a) Walker2d-v2
374
+
375
+ ![](images/231ee416b78a3b1357bd9b0f8580816effcb14629c7a3554ee8b6c450b472444.jpg)
376
+ (b) Hopper-v2
377
+
378
+ ![](images/9ea9797c46bb3b7282f175368fed60f171998ca380949d691f376b20b6199621.jpg)
379
+
380
+ ![](images/880540fe5512e6d6e8bffa0c2ce5a676b00d19913f729d836beced38e36c6f02.jpg)
381
+
382
+ ![](images/0af1887d20bea574c40579ea385e092779fdca8c1562f702ac3edabb8ab755ee.jpg)
383
+
384
+ ![](images/01fc284d1684b529ebdf595fa33d601041271cb2af40a89432a4827bdaefce02.jpg)
385
+
386
+ ![](images/7484f11c0953cbf1f8626b4871d6d5cfe9cebdb9593f74848f5c0aac983e81a2.jpg)
387
+
388
+ ![](images/bf94d0b01c048e7c6978bd3264f9282b92c1cfe81673877da08da0a9eaa20c0d.jpg)
389
+ Figure 10: Convergence of gradient estimates to the "true" expected gradient (c.f. (1)). We measure the cosine similarity between the true gradient (approximated using around 1M samples) and gradient estimates, as a function of number of state-action pairs used to obtain the later. For a particular policy and state-action pair count, we obtain multiple estimates of this cosine similarity and then report the average, along with the $95\%$ confidence intervals (shaded). Each of the colored lines (for a specific algorithm) represents a particular trained agent (we perform multiple trials with the same hyperparameter configurations but different random seeds). The dotted vertical black line (at 2K) indicates the sample regime used for gradient estimation in standard practical implementations of policy gradient methods.
390
+
391
+ ![](images/65a76f1d7b46d4d8d7764124e3586f92972ef5ab97e5d1c801934bfce57dc450.jpg)
392
+ (a) Walker2d-v2
393
+
394
+ ![](images/14ae4e7c83667910646047d2ff50582743b82f36c27475e5d32b177096c76b98.jpg)
395
+ (b) Hopper-v2
396
+
397
+ ![](images/bdea1d83ef7106a0929e9cbfe3a334d70439bafbaaf2b835d994c334767f8a16.jpg)
398
+
399
+ ![](images/e2bc29b873ff769d3cc7c48c2336fb457870738e21df3ba2528479bdcc503dc7.jpg)
400
+ A.5 VALUE PREDICTION
401
+
402
+ ![](images/62c7a309a9f767119bf6df26791f8fec5865dfa564650abc64c1566fdd0c352a.jpg)
403
+
404
+ ![](images/f2d8bc5caf50077763d980a1728cf0ad9a0916e00cb9aed8466fbc069ed15d94.jpg)
405
+ (a) Hopper-v2
406
+
407
+ ![](images/c2399e920f9d00ec473e9ea8565424c6b6b33c76b1680ecf88a5b74f79bb41c9.jpg)
408
+
409
+ ![](images/e6f160bfc53663bc4d0bfc54203426ca8562b43aed4652d7b8f5fc1a577c8566.jpg)
410
+ (b) Walker2d-v2
411
+ (c) Humanoid-v2
412
+
413
+ ![](images/e6199ac0ed8c7c0798688ae4b411e6c84032bea1e2fa9932908954b91f1f864d.jpg)
414
+ Figure 11: Quality of value prediction in terms of mean relative error (MRE) on train state-action pairs for agents trained to solve the MuJoCo tasks. We see in that the agents do indeed succeed at solving the supervised learning task they are trained for – the train MRE on the GAE-based value loss $(V_{old} + A_{GAE})^2$ (c.f. (4)) is small (left column). We observe that the returns MRE is quite small as well (right column).
415
+
416
+ ![](images/4b3f0681a4b1c8e03fd2c231bf28eeb6c928199cc68550f896da296c0c0db88c.jpg)
417
+ (a) Hopper-v2
418
+
419
+ ![](images/ea48beb728ea86e2a9b555e15bc7bc125b780b9f4b66a814fb84e1666cd0413c.jpg)
420
+
421
+ ![](images/121093fbaa98a14ae9bc6ebddedb748c2471f88676f73d54f960cc596af9874b.jpg)
422
+ (b) Humanoid-v2
423
+
424
+ ![](images/85984bbe8fcdc7cb628c77080982a49988e658521c3df0f5c2c08caa9fb39d83.jpg)
425
+ Figure 12: Quality of value prediction in terms of mean relative error (MRE) on heldout state-action pairs for agents trained to solve MuJoCo tasks. We see in that the agents do indeed succeed at solving the supervised learning task they are trained for – the validation MRE on the GAE-based value loss $(V_{old} + A_{GAE})^2$ (c.f. (4)) is small (left column). On the other hand, we see that the returns MRE is still quite high – the learned value function is off by about $50\%$ with respect to the underlying true value function (right column).
426
+
427
+ # A.6 OPTIMIZATION LANDSCAPE
428
+
429
+ ![](images/99e0df03f49000cd08918860f25bebad791586882193221e32b6345bf2831e09.jpg)
430
+ Figure 13: Humanoid-v2 - PPO reward landscapes.
431
+
432
+ ![](images/9c404f0de41451290844c749d63fdfb0df377e53e4263bef71d5f06622c09587.jpg)
433
+ Figure 14: Humanoid-v2 - TRPO reward landscapes.
434
+
435
+ ![](images/4bff42f4684f862f6195c32ee85334599f20cc33d7785e4c1d76aa26cb03bf34.jpg)
436
+ Figure 15: Walker2d-v2 - PPO reward landscapes.
437
+
438
+ ![](images/cbc38389c63d7243a63b7dcaf2f1d7868a6967c5e1e6d1aa074dc1c33843c028.jpg)
439
+ Figure 16: Walker2d-v2 - TRPO reward landscapes.
440
+
441
+ ![](images/f0848542a5290df0bd17cfa324dec83df726255271b48a07b6e6a207f153f9de.jpg)
442
+
443
+ ![](images/beb58feaf2e245cdcecc92375abab47f6fbdde594c68003fc5ae55888d0a34b4.jpg)
444
+ Figure 17: Hopper-v2 - PPO reward landscapes.
445
+
446
+ ![](images/01529c5cd95d576243ee49a59e5c630369ff41b12fbfdaa12e5f581a17ebb327.jpg)
447
+ Figure 18: Hopper-v2 - TRPO reward landscapes.
448
+
449
+ ![](images/6495a64736f1c049337bbab7ce32120ca7c8ebc21dda599b67b5c14db8533a4e.jpg)
450
+ Figure 19: Humanoid-v2 TRPO landscape concentration (see Figure 5 for a description).
451
+
452
+ ![](images/6c986388d6f0f43f78e119ff170645f0403626b78311c1f23c233d94c3d34c21.jpg)
453
+
454
+ ![](images/89f6f54aede32e9bffaaa5fc15cceea2ead652dee332edcb5e4c4fd9c359c3ff.jpg)
455
+
456
+ ![](images/bdc9f0ceb2a68d7606d56678f7ffc9a14cadf147b7c19ba81a5c089a9c248ec0.jpg)
457
+
458
+ ![](images/ad5c531f5f1089c97ee8924a0cff5a68c1d3352f806bf00de9c3279ee43fc4e8.jpg)
459
+
460
+ ![](images/0d961f996c7b4d437089d7e140a43dbe9e5df7d33ca47c15e7d629ff140bfd1f.jpg)
461
+
462
+ ![](images/abba94964c918056386cfd7eb99d05010257f4912ef431154349c9d4f1f666f3.jpg)
463
+
464
+ ![](images/63dfcfb8ba9a7b0f90a3033f3a0dc94018d9faa11d07ae767dfd8f70484182a9.jpg)
465
+
466
+ ![](images/a64a7a71613c837854ce8e6249da5216b4e1d10924e9be46ae294a0d600e5579.jpg)
467
+
468
+ ![](images/62fad68595569444bf07e4397425c1e9c0d16e5e49fa6ae05bdc43e824baba3e.jpg)
469
+
470
+ ![](images/49624d58596fe056904d803120962ef4ac57eb00d57dc0a5c62c78451da9a2fd.jpg)
471
+ Figure 20: Humanoid-v2 PPO landscape concentration (see Figure 5 for a description).
472
+
473
+ ![](images/70a3220cb7ad86c33d7d60e7456b69ae870dd9e8ae2229daf79587aa131bf803.jpg)
474
+
475
+ ![](images/695f19b49438458a433e3bc2839c21db9ff521a4ad90f5f0726ff561201b6cd0.jpg)
acloserlookatdeeppolicygradients/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5168ffc3c44d01697b640e604dffe1167cf63c56bec978a725e97232f6730f4
3
+ size 2501676
acloserlookatdeeppolicygradients/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a44c3997ace25174bdca701d6ad5907b8ed2c8d2e21b9b8ce920931d17d6f33a
3
+ size 546983
acloserlookattheapproximationcapabilitiesofneuralnetworks/416e4db6-3318-4af2-9f5e-4dc105023431_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:084be601e32a7c5aa56e72452a7a39bfe9649e9e1f56699ff19d074aacd5fde8
3
+ size 134010
acloserlookattheapproximationcapabilitiesofneuralnetworks/416e4db6-3318-4af2-9f5e-4dc105023431_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0e5d32415c62c72c54850eb8aff1af84e81a2f21ae6e0e65cb36fc1dce4f787
3
+ size 155513
acloserlookattheapproximationcapabilitiesofneuralnetworks/416e4db6-3318-4af2-9f5e-4dc105023431_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad565120a0912ba41499c743b267712769627594acafb2bd41c19c16c36ceeca
3
+ size 409923
acloserlookattheapproximationcapabilitiesofneuralnetworks/full.md ADDED
@@ -0,0 +1,523 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A CLOSER LOOK AT THE APPROXIMATION CAPABILITIES OF NEURAL NETWORKS
2
+
3
+ Kai Fong Ernest Chong
4
+
5
+ Information Systems Technology and Design (ISTD) pillar, Singapore University of Technology and Design, Singapore ernest_chong@sutd.edu.sg
6
+
7
+ # ABSTRACT
8
+
9
+ The universal approximation theorem, in one of its most general versions, says that if we consider only continuous activation functions $\sigma$ , then a standard feedforward neural network with one hidden layer is able to approximate any continuous multivariate function $f$ to any given approximation threshold $\varepsilon$ , if and only if $\sigma$ is non-polynomial. In this paper, we give a direct algebraic proof of the theorem. Furthermore we shall explicitly quantify the number of hidden units required for approximation. Specifically, if $X \subseteq \mathbb{R}^n$ is compact, then a neural network with $n$ input units, $m$ output units, and a single hidden layer with $\binom{n+d}{d}$ hidden units (independent of $m$ and $\varepsilon$ ), can uniformly approximate any polynomial function $f: X \to \mathbb{R}^m$ whose total degree is at most $d$ for each of its $m$ coordinate functions. In the general case that $f$ is any continuous function, we show there exists some $N \in \mathcal{O}(\varepsilon^{-n})$ (independent of $m$ ), such that $N$ hidden units would suffice to approximate $f$ . We also show that this uniform approximation property (UAP) still holds even under seemingly strong conditions imposed on the weights. We highlight several consequences: (i) For any $\delta > 0$ , the UAP still holds if we restrict all non-bias weights $w$ in the last layer to satisfy $|w| < \delta$ . (ii) There exists some $\lambda > 0$ (depending only on $f$ and $\sigma$ ), such that the UAP still holds if we restrict all non-bias weights $w$ in the first layer to satisfy $|w| > \lambda$ . (iii) If the non-bias weights in the first layer are fixed and randomly chosen from a suitable range, then the UAP holds with probability 1.
10
+
11
+ # 1 INTRODUCTION AND OVERVIEW
12
+
13
+ A standard (feedforward) neural network with $n$ input units, $m$ output units, and with one or more hidden layers, refers to a computational model $\mathcal{N}$ that can compute a certain class of functions $\rho : \mathbb{R}^n \to \mathbb{R}^m$ , where $\rho = \rho_W$ is parametrized by $W$ (called the weights of $\mathcal{N}$ ). Implicitly, the definition of $\rho$ depends on a choice of some fixed function $\sigma : \mathbb{R} \to \mathbb{R}$ , called the activation function of $\mathcal{N}$ . Typically, $\sigma$ is assumed to be continuous, and historically, the earliest commonly used activation functions were sigmoidal.
14
+
15
+ A key fundamental result justifying the use of sigmoidal activation functions was due to Cybenko (1989), Hornik et al. (1989), and Funahashi (1989), who independently proved the first version of what is now famously called the universal approximation theorem. This first version says that if $\sigma$ is sigmoidal, then a standard neural network with one hidden layer would be able to uniformly approximate any continuous function $f: X \to \mathbb{R}^m$ whose domain $X \subseteq \mathbb{R}^n$ is compact. Hornik (1991) extended the theorem to the case when $\sigma$ is any continuous bounded non-constant activation function. Subsequently, Leshno et al. (1993) proved that for the class of continuous activation functions, a standard neural network with one hidden layer is able to uniformly approximate any continuous function $f: X \to \mathbb{R}^m$ on any compact $X \subseteq \mathbb{R}^n$ , if and only if $\sigma$ is non-polynomial.
16
+
17
+ Although a single hidden layer is sufficient for the uniform approximation property (UAP) to hold, the number of hidden units required could be arbitrarily large. Given a subclass $\mathcal{F}$ of real-valued continuous functions on a compact set $X\subseteq \mathbb{R}^n$ , a fixed activation function $\sigma$ , and some $\varepsilon >0$ let $N = N(\mathcal{F},\sigma ,\varepsilon)$ be the minimum number of hidden units required for a single-hidden-layer neural network to be able to uniformly approximate every $f\in \mathcal{F}$ within an approximation error
18
+
19
+ threshold of $\varepsilon$ . If $\sigma$ is the rectified linear unit (ReLU) $x \mapsto \max(0, x)$ , then $N$ is at least $\Omega\left(\frac{1}{\sqrt{\varepsilon}}\right)$ when $\mathcal{F}$ is the class of $C^2$ non-linear functions (Yarotsky, 2017), or the class of strongly convex differentiable functions (Liang & Srikant, 2016); see also (Arora et al., 2018). If $\sigma$ is any smooth non-polynomial function, then $N$ is at most $\mathcal{O}(\varepsilon^{-n})$ for the class of $C^1$ functions with bounded Sobolev norm (Mhaskar, 1996); cf. (Pinkus, 1999, Thm. 6.8), (Maiorov & Pinkus, 1999). As a key highlight of this paper, we show that if $\sigma$ is an arbitrary continuous non-polynomial function, then $N$ is at most $\mathcal{O}(\varepsilon^{-n})$ for the entire class of continuous functions. In fact, we give an explicit upper bound for $N$ in terms of $\varepsilon$ and the modulus of continuity of $f$ , so better bounds could be obtained for certain subclasses $\mathcal{F}$ , which we discuss further in Section 4. Furthermore, even for the wider class $\mathcal{F}$ of all continuous functions $f: X \to \mathbb{R}^{m}$ , the bound is still $\mathcal{O}(\varepsilon^{-n})$ , independent of $m$ .
20
+
21
+ To prove this bound, we shall give a direct algebraic proof of the universal approximation theorem, in its general version as stated by Leshno et al. (1993) (i.e. $\sigma$ is continuous and non-polynomial). An important advantage of our algebraic approach is that we are able to glean additional information on sufficient conditions that would imply the UAP. Another key highlight we have is that if $\mathcal{F}$ is the subclass of polynomial functions $f: X \to \mathbb{R}^m$ with total degree at most $d$ for each coordinate function, then $\binom{n+d}{d}$ hidden units would suffice. In particular, notice that our bound $N \leq \binom{n+d}{d}$ does not depend on the approximation error threshold $\varepsilon$ or the output dimension $m$ .
22
+
23
+ We shall also show that the UAP holds even under strong conditions on the weights. Given any $\delta > 0$ , we can always choose the non-bias weights in the last layer to have small magnitudes no larger than $\delta$ . Furthermore, we show that there exists some $\lambda > 0$ (depending only on $\sigma$ and the function $f$ to be approximated), such that the non-bias weights in the first layer can always be chosen to have magnitudes greater than $\lambda$ . Even with these seemingly strong restrictions on the weights, we show that the UAP still holds. Thus, our main results can be collectively interpreted as a quantitative refinement of the universal approximation theorem, with extensions to restricted weight values.
24
+
25
+ Outline: Section 2 covers the preliminaries, including relevant details on arguments involving dense sets. Section 3 gives precise statements of our results, while Section 4 discusses the consequences of our results. Section 5 introduces our algebraic approach and includes most details of the proofs of our results; details omitted from Section 5 can be found in the appendix. Finally, Section 6 concludes our paper with further remarks.
26
+
27
+ # 2 PRELIMINARIES
28
+
29
+ # 2.1 NOTATION AND DEFINITIONS
30
+
31
+ Let $\mathbb{N}$ be the set of non-negative integers, let $\mathbf{0}_n$ be the zero vector in $\mathbb{R}^n$ , and let $\operatorname{Mat}(k,\ell)$ be the vector space of all $k$ -by- $\ell$ matrices with real entries. For any function $f:\mathbb{R}^n\to \mathbb{R}^m$ , let $f^{[t]}$ denote the $t$ -th coordinate function of $f$ (for each $1\leq t\leq m$ ). Given $\alpha = (\alpha_{1},\ldots ,\alpha_{n})\in \mathbb{N}^{n}$ and any $n$ -tuple $x = (x_{1},\dots,x_{n})$ , we write $x^{\alpha}$ to mean $x_{1}^{\alpha_{1}}\cdots x_{n}^{\alpha_{n}}$ . If $x\in \mathbb{R}^n$ , then $x^{\alpha}$ is a real number, while if $x$ is a sequence of variables, then $x^{\alpha}$ is a monomial, i.e., an $n$ -variate polynomial with a single term. Let $\mathcal{W}_N^{n,m}:= \{W\in \mathrm{Mat}(n + 1,N)\times \mathrm{Mat}(N + 1,m)\}$ for each $N\geq 1$ , and define $\mathcal{W}^{n,m} = \bigcup_{N\geq 1}\mathcal{W}_N^{n,m}$ . If the context is clear, we suppress the superscripts $n,m$ in $\mathcal{W}_N^{n,m}$ and $\mathcal{W}^{n,m}$ .
32
+
33
+ Given any $X \subseteq \mathbb{R}^n$ , let $\mathcal{C}(X)$ be the vector space of all continuous functions $f: X \to \mathbb{R}$ . We use the convention that every $f \in \mathcal{C}(X)$ is a function $f(x_1, \ldots, x_n)$ in terms of the variables $x_1, \ldots, x_n$ , unless $n = 1$ , in which case $f$ is in terms of a single variable $x$ (or $y$ ). We say $f$ is non-zero if $f$ is not identically the zero function on $X$ . Let $\mathcal{P}(X)$ be the subspace of all polynomial functions in $\mathcal{C}(X)$ . For each $d \in \mathbb{N}$ , let $\mathcal{P}_{\leq d}(X)$ (resp. $\mathcal{P}_d(X)$ ) be the subspace consisting of all polynomial functions of total degree $\leq d$ (resp. exactly $d$ ). More generally, let $\mathcal{C}(X, \mathbb{R}^m)$ be the vector space of all continuous functions $f: X \to \mathbb{R}^m$ , and define $\mathcal{P}(X, \mathbb{R}^m)$ , $\mathcal{P}_{\leq d}(X, \mathbb{R}^m)$ , $\mathcal{P}_d(X, \mathbb{R}^m)$ analogously.
34
+
35
+ Throughout, we assume that $\sigma \in \mathcal{C}(\mathbb{R})$ . For every $W = (W^{(1)}, W^{(2)}) \in \mathcal{W}$ , let $\mathbf{w}_j^{(k)}$ be the $j$ -th column vector of $W^{(k)}$ , and let $w_{i,j}^{(k)}$ be the $(i,j)$ -th entry of $W^{(k)}$ (for $k = 1, 2$ ). The index $i$ begins at $i = 0$ , while the indices $j, k$ begin at $j = 1, k = 1$ respectively. For convenience, let $\widehat{\mathbf{w}}_j^{(k)}$ denote the truncation of $\mathbf{w}_j^{(k)}$ obtained by removing the first entry $w_{0,j}^{(k)}$ . Define the function $\rho_W^\sigma : \mathbb{R}^n \to \mathbb{R}^m$ so that for each $1 \leq j \leq m$ , the $j$ -th coordinate function $\rho_W^{\sigma [j]}$ is given by the map
36
+
37
+ $$
38
+ x \mapsto w _ {0, j} ^ {(2)} + \sum_ {i = 1} ^ {N} w _ {i, j} ^ {(2)} \sigma (\mathbf {w} _ {i} ^ {(1)} \cdot (1, x)),
39
+ $$
40
+
41
+ where " $\cdot$ " denotes dot product, and $(1,x)$ denotes a column vector in $\mathbb{R}^{n+1}$ formed by concatenating 1 before $x$ . The class of functions that neural networks $\mathcal{N}$ with one hidden layer can compute is precisely $\{\rho_W^\sigma : W \in \mathcal{W}\}$ , where $\sigma$ is called the activation function of $\mathcal{N}$ (or of $\rho_W^\sigma$ ). Functions $\rho_W^\sigma$ satisfying $W \in \mathcal{W}_N$ correspond to neural networks with $N$ hidden units (in its single hidden layer). Every $w_{i,j}^{(k)}$ is called a weight in the $k$ -th layer, where $w_{i,j}^{[k]}$ is called a bias weight (resp. non-bias weight) if $i = 0$ (resp. $i \neq 0$ ).
42
+
43
+ Notice that we do not apply the activation function $\sigma$ to the output layer. This is consistent with previous approximation results for neural networks. The reason is simple: $\sigma \circ \rho_W^{\sigma [j]}$ (restricted to domain $X\subseteq \mathbb{R}^n$ ) cannot possibly approximate $f:X\to \mathbb{R}$ if there exists some $x_0\in X$ such that $\sigma (X)$ is bounded away from $f(x_0)$ . If instead $f(X)$ is contained in the closure of $\sigma (X)$ , then applying $\sigma$ to $\rho_W^{\sigma [j]}$ has essentially the same effect as allowing for bias weights $w_{0,j}^{(2)}$ .
44
+
45
+ Although some authors, e.g. (Leshno et al., 1993), do not explicitly include bias weights in the output layer, the reader should check that if $\sigma$ is not identically zero, say $\sigma(y_0) \neq 0$ , then having a bias weight $w_{0,j}^{(2)} = c$ is equivalent to setting $w_{0,j}^{(2)} = 0$ (i.e. no bias weight in the output layer) and introducing an $(N + 1)$ -th hidden unit, with corresponding weights $w_{0,N+1}^{(1)} = y_0$ , $w_{i,N+1}^{(1)} = 0$ for all $1 \leq i \leq n$ , and $w_{N+1,j}^{(2)} = \frac{c}{\sigma(y_0)}$ ; this means our results also apply to neural networks without bias weights in the output layer (but with one additional hidden unit).
46
+
47
+ # 2.2 ARGUMENTS INVOLVING DENSE SUBSETS
48
+
49
+ A key theme in this paper is the use of dense subsets of metric spaces. We shall consider several notions of "dense". First, recall that a metric on a set $S$ is any function $\mathfrak{d}: S \times S \to \mathbb{R}$ such that for all $x, y, z \in S$ , the following conditions hold:
50
+
51
+ (i) $\mathfrak{d}(x,y)\geq 0$ , with equality holding if and only if $x = y$
52
+ (ii) $\mathfrak{d}(x,y) = \mathfrak{d}(y,x)$
53
+ (iii) $\mathfrak{d}(x,z)\leq \mathfrak{d}(x,y) + \mathfrak{d}(y,z).$
54
+
55
+ The set $S$ , together with a metric on $S$ , is called a metric space. For example, the usual Euclidean norm for vectors in $\mathbb{R}^n$ gives the Euclidean metric $(u,v) \mapsto \| u - v \|_2$ , hence $\mathbb{R}^n$ is a metric space. In particular, every pair in $\mathcal{W}_N$ can be identified with a vector in $\mathbb{R}^{(m + n + 1)N}$ , so $\mathcal{W}_N$ , together with the Euclidean metric, is a metric space.
56
+
57
+ Given a metric space $X$ (with metric $\mathfrak{d}$ ), and some subset $U \subseteq X$ , we say that $U$ is dense in $X$ (w.r.t. $\mathfrak{d}$ ) if for all $\varepsilon > 0$ and all $x \in X$ , there exists some $u \in U$ such that $\mathfrak{d}(x, u) < \varepsilon$ . Arbitrary unions of dense subsets are dense. If $U \subseteq U' \subseteq X$ and $U$ is dense in $X$ , then $U'$ must also be dense in $X$ .
58
+
59
+ A basic result in algebraic geometry says that if $p \in \mathcal{P}(\mathbb{R}^n)$ is non-zero, then $\{x \in \mathbb{R}^n : p(x) \neq 0\}$ is a dense subset of $\mathbb{R}^n$ (w.r.t. the Euclidean metric). This subset is in fact an open set in the Zariski topology, hence any finite intersection of such Zariski-dense open sets is dense; see (Eisenbud, 1995). More generally, the following is true: Let $p_1, \ldots, p_k \in \mathcal{P}(\mathbb{R}^n)$ , and suppose that $X := \{x \in \mathbb{R}^n : p_i(x) = 0 \text{ for all } 1 \leq i \leq k\}$ . If $p \in \mathcal{P}(X)$ is non-zero, then $\{x \in X : p(x) \neq 0\}$ is a dense subset of $X$ (w.r.t. the Euclidean metric). In subsequent sections, we shall frequently use these facts.
60
+
61
+ Let $X \subseteq \mathbb{R}^n$ be a compact set. (Recall that $X$ is compact if it is bounded and contains all of its limit points.) For any real-valued function $f$ whose domain contains $X$ , the uniform norm of $f$ on $X$ is $\| f \|_{\infty, X} \coloneqq \sup \{|f(x)| : x \in X\}$ . More generally, if $f: X \to \mathbb{R}^m$ , then we define $\| f \|_{\infty, X} \coloneqq \max \{\| f^{[j]} \|_{\infty, X} : 1 \leq j \leq m\}$ . The uniform norm of functions on $X$ gives the uniform metric $(f, g) \mapsto \| f - g \|_{\infty, X}$ , hence $\mathcal{C}(X)$ is a metric space.
62
+
63
+ # 2.3 BACKGROUND ON APPROXIMATION THEORY
64
+
65
+ Theorem 2.1 (Stone-Weirstrass theorem). Let $X \subseteq \mathbb{R}^n$ be compact. For any $f \in \mathcal{C}(X)$ , there exists a sequence $\{p_k\}_{k \in \mathbb{N}}$ of polynomial functions in $\mathcal{P}(X)$ such that $\lim_{k \to \infty} \|f - p_k\|_{\infty, X} = 0$ .
66
+
67
+ Let $X \subseteq \mathbb{R}$ be compact. For all $d \in \mathbb{N}$ and $f \in \mathcal{C}(X)$ , define
68
+
69
+ $$
70
+ E _ {d} (f) := \inf \left\{\| f - p \| _ {\infty , X}: p \in \mathcal {P} _ {\leq d} (X) \right\}. \tag {1}
71
+ $$
72
+
73
+ A central result in approximation theory, due to Chebyshev, says that for fixed $d$ , $f$ , the infimum in (1) is attained by some unique $p^* \in \mathcal{P}_{\leq d}(\mathbb{R})$ ; see (Rivlin, 1981, Chap. 1). (Notice here that we define $p^*$ to have domain $\mathbb{R}$ .) This unique polynomial $p^*$ is called the best polynomial approximant to $f$ of degree $d$ .
74
+
75
+ Given a metric space $X$ with metric $\mathfrak{d}$ , and any uniformly continuous function $f: X \to \mathbb{R}$ , the modulus of continuity of $f$ is a function $\omega_f: [0, \infty] \to [0, \infty]$ defined by
76
+
77
+ $$
78
+ \omega_ {f} (\delta) := \sup \{| f (x) - f (y) |: x, y \in X, \mathfrak {d} (x, y) \leq \delta \}.
79
+ $$
80
+
81
+ By the Heine-Cantor theorem, any continuous $f$ with a compact domain is uniformly continuous.
82
+
83
+ Theorem 2.2 (Jackson's theorem; see (Rivlin, 1981, Cor. 1.4.1)). Let $d \geq 1$ be an integer, and let $Y \subseteq \mathbb{R}$ be a closed interval of length $r \geq 0$ . Suppose $f \in \mathcal{C}(Y)$ , and let $p^*$ be the best polynomial approximant to $f$ of degree $d$ . Then $\| f - p^* \|_{\infty, Y} = E_d(f) \leq 6\omega_f\left(\frac{r}{2d}\right)$ .
84
+
85
+ # 3 MAIN RESULTS
86
+
87
+ Throughout this section, let $X \subseteq \mathbb{R}^n$ be a compact set.
88
+
89
+ Theorem 3.1. Let $d \geq 2$ be an integer, and let $f \in \mathcal{P}_{\leq d}(X, \mathbb{R}^m)$ (i.e. each coordinate function $f^{[t]}$ has total degree $\leq d$ ). If $\sigma \in \mathcal{C}(\mathbb{R}) \backslash \mathcal{P}_{\leq d-1}(\mathbb{R})$ , then for every $\varepsilon > 0$ , there exists some $W \in \mathcal{W}_{\binom{n+d}{d}}$ such that $\| f - \rho_W^\sigma \|_{\infty, X} < \varepsilon$ . Furthermore, the following holds:
90
+
91
+ (i) Given any $\lambda > 0$ , we can choose this $W$ to satisfy the condition that $|w_{i,j}^{(2)}| < \lambda$ for all non-bias weights $w_{i,j}^{(2)}$ (i.e. $i \neq 0$ ) in the second layer.
92
+ (ii) There exists some $\lambda' > 0$ , depending only on $f$ and $\sigma$ , such that we could choose the weights of $W$ in the first layer to satisfy the condition that $\|\widehat{\mathbf{w}}_j^{(1)}\|_2 > \lambda'$ for all $j$ .
93
+
94
+ Theorem 3.2. Let $f \in \mathcal{C}(X, \mathbb{R}^m)$ , and suppose $\sigma \in \mathcal{C}(\mathbb{R}) \backslash \mathcal{P}(\mathbb{R})$ . Then for every $\varepsilon > 0$ , there exists an integer $N \in \mathcal{O}(\varepsilon^{-n})$ (independent of $m$ ), and some $W \in \mathcal{W}_N$ , such that $\| f - \rho_W^\sigma \|_{\infty, X} < \varepsilon$ . In particular, if we let $D := \sup \{\| x - y \|_2 : x, y \in X\}$ be the diameter of $X$ , then we can set $N = \binom{n + d_{\varepsilon}}{d_{\varepsilon}}$ , where $d_{\varepsilon} := \min \{d \in \mathbb{Z} : d \geq 2, \omega_{f^{[t]}}\left(\frac{D}{2d}\right) < \frac{\varepsilon}{6} \text{ for all } 1 \leq t \leq m\}$ . (Note that $d_{\varepsilon}$ is well-defined, since $\lim_{\delta \to 0^+} \omega_{f^{[t]}}(\delta) = 0$ for each $t$ .) Furthermore, we could choose this $W$ to satisfy either (i) or (ii), where (i), (ii) are conditions on $W$ as described in Theorem 3.1.
95
+
96
+ Theorem 3.3. Let $f \in \mathcal{C}(X, \mathbb{R}^m)$ , and suppose that $\sigma \in \mathcal{C}(\mathbb{R}) \backslash \mathcal{P}(\mathbb{R})$ . Then there exists $\lambda > 0$ (which depends only on $f$ and $\sigma$ ) such that for every $\varepsilon > 0$ , there exists an integer $N$ (independent of $m$ ) such that the following holds:
97
+
98
+ Let $W \in \mathcal{W}_N$ such that each $\widehat{\mathbf{w}}_j^{(1)} \in \mathbb{R}^n$ (for $1 \leq j \leq N$ ) is chosen uniformly at random from the set $\{\mathbf{u} \in \mathbb{R}^n : \| \mathbf{u} \|_2 > \lambda\}$ . Then, with probability 1, there exist choices for the bias weights $w_{0,j}^{(1)}$ (for $1 \leq j \leq N$ ) in the first layer, and (both bias and non-bias) weights $w_{i,j}^{(2)}$ in the second layer, such that $\| f - \rho_W^\sigma \|_{\infty, X} < \varepsilon$ .
99
+
100
+ Moreover, $N\in \mathcal{O}(\varepsilon^{-n})$ for general $f\in \mathcal{C}(X,\mathbb{R}^m)$ , and we can let $N = \binom{n + d}{d}$ if $f\in \mathcal{P}_{\leq d}(X,\mathbb{R}^m)$ .
101
+
102
+ # 4 DISCUSSION
103
+
104
+ The universal approximation theorem (version of Leshno et al. (1993)) is an immediate consequence of Theorem 3.2 and the observation that $\sigma$ must be non-polynomial for the UAP to hold, which follows from the fact that the uniform closure of $\mathcal{P}_{\leq d}(X)$ is $\mathcal{P}_{\leq d}(X)$ itself, for every integer $d \geq 1$ . Alternatively, we could infer the universal approximation theorem by applying the Stone-Weirstrass theorem (Theorem 2.1) to Theorem 3.1.
105
+
106
+ Given fixed $n, m, d$ , a compact set $X \subseteq \mathbb{R}^n$ , and $\sigma \in \mathcal{C}(\mathbb{R}) \backslash \mathcal{P}_{\leq d - 1}(\mathbb{R})$ , Theorem 3.1 says that we could use a fixed number $N$ of hidden units (independent of $\varepsilon$ ) and still be able to approximate any function $f \in \mathcal{P}_{\leq d}(X, \mathbb{R}^m)$ to any desired approximation error threshold $\varepsilon$ . Our $\varepsilon$ -free bound, although possibly surprising to some readers, is not the first instance of an $\varepsilon$ -free bound: Neural networks with two hidden layers of sizes $2n + 1$ and $4n + 3$ respectively are able to uniformly approximate any $f \in \mathcal{C}(X)$ , provided that we use a (somewhat pathological) activation function (Maiorov
107
+
108
+ & Pinkus, 1999); cf. (Pinkus, 1999). Lin et al. (2017) showed that for fixed $n$ , $d$ , and a fixed smooth non-linear $\sigma$ , there is a fixed $N$ (i.e. $\varepsilon$ -free), such that a neural network with $N$ hidden units is able to approximate any $f \in \mathcal{P}_{\leq d}(X)$ . An explicit expression for $N$ is not given, but we were able to infer from their constructive proof that $N = 4\binom{n+d+1}{d} - 4$ hidden units are required, over $d-1$ hidden layers (for $d \geq 2$ ). In comparison, we require less hidden units and a single hidden layer.
109
+
110
+ Our proof of Theorem 3.2 is an application of Jackson's theorem (Theorem 2.2) to Theorem 3.1, which gives an explicit bound in terms of the values of the modulus of continuity $\omega_{f}$ of the function $f$ to be approximated. The moduli of continuity of several classes of continuous functions have explicit characterizations. For example, given constants $k > 0$ and $0 < \alpha \leq 1$ , recall that a continuous function $f:X\to \mathbb{R}$ (for compact $X\subseteq \mathbb{R}^n$ ) is called $k$ -Lipschitz if $|f(x) - f(y)|\leq k\| x - y\|$ for all $x,y\in X$ , and it is called $\alpha$ -Hölder if there is some constant $c$ such that $|f(x) - f(y)|\leq c\| x - y\|^{\alpha}$ for all $x,y\in X$ . The modulus of continuity of a $k$ -Lipschitz (resp. $\alpha$ -Hölder) continuous function $f$ is $\omega_{f}(t) = kt$ (resp. $\omega_{f}(t) = ct^{\alpha}$ ), hence Theorem 3.2 implies the following corollary.
111
+
112
+ Corollary 4.1. Suppose $\sigma \in \mathcal{C}(\mathbb{R})\backslash \mathcal{P}(\mathbb{R})$
113
+
114
+ (i) If $f:[0,1]^n \to \mathbb{R}$ is $k$ -Lipschitz continuous, then for every $\varepsilon > 0$ , there exists some $W \in \mathcal{W}_N$ that satisfies $\| f - \rho_W^\sigma \|_{\infty, X} < \varepsilon$ , where $N = \binom{n + \lceil \frac{3k}{\varepsilon} \rceil}{n}$ .
115
+ (ii) If $f:[0,1]^n \to \mathbb{R}$ is $\alpha$ -Hölder continuous, then there is a constant $k$ such that for every $\varepsilon > 0$ , there exists some $W \in \mathcal{W}_N$ that satisfies $\| f - \rho_W^\sigma \|_{\infty, X} < \varepsilon$ , where $N = \binom{n+d}{d}$ , and $d = \lceil \frac{1}{2} \left( \frac{k}{\varepsilon} \right)^{1/\alpha} \rceil$ .
116
+
117
+ An interesting consequence of Theorem 3.3 is the following: The freezing of lower layers of a neural network, even in the extreme case that all frozen layers are randomly initialized and the last layer is the only "non-frozen" layer, does not necessarily reduce the representability of the resulting model. Specifically, in the single-hidden-layer case, we have shown that if the non-bias weights in the first layer are fixed and randomly chosen from some suitable fixed range, then the UAP holds with probability 1, provided that there are sufficiently many hidden units. Of course, this representability does not reveal anything about the learnability of such a model. In practice, layers are already pre-trained before being frozen. It would be interesting to understand quantitatively the difference between having pre-trained frozen layers and having randomly initialized frozen layers.
118
+
119
+ Theorem 3.3 can be viewed as a result on random features, which were formally studied in relation to kernel methods (Rahimi & Recht, 2007). In the case of ReLU activation functions, Sun et al. (2019) proved an analog of Theorem 3.3 for the approximation of functions in a reproducing kernel Hilbert space; cf. (Rahimi & Recht, 2008). For a good discussion on the role of random features in the representability of neural networks, see (Yehudai & Shamir, 2019).
120
+
121
+ The UAP is also studied in other contexts, most notably in relation to the depth and width of neural networks. Lu et al. (2017) proved the UAP for neural networks with hidden layers of bounded width, under the assumption that ReLU is used as the activation function. Soon after, Hanin (2017) strengthened the bounded-width UAP result by considering the approximation of continuous convex functions. Recently, the role of depth in the expressive power of neural networks has gathered much interest (Delalleau & Bengio, 2011; Eldan & Shamir, 2016; Mhaskar et al., 2017; Montúfar et al., 2014; Telgarsky, 2016). We do not address depth in this paper, but we believe it is possible that our results can be applied iteratively to deeper neural networks, perhaps in particular for the approximation of compositional functions; cf. (Mhaskar et al., 2017).
122
+
123
+ # 5 AN ALGEBRAIC APPROACH FOR PROVING UAP
124
+
125
+ We begin with a "warm-up" result. Subsequent results, even if they seem complicated, are actually multivariate extensions of this "warm-up" result, using very similar ideas.
126
+
127
+ Theorem 5.1. Let $p(x)$ be a real polynomial of degree $d$ with all-non-zero coefficients, and let $a_1, \ldots, a_{d+1}$ be real numbers. For each $1 \leq j \leq d+1$ , define $f_j : \mathbb{R} \to \mathbb{R}$ by $x \mapsto p(a_jx)$ . Then $f_1, \ldots, f_{d+1}$ are linearly independent if and only if $a_1, \ldots, a_{d+1}$ are distinct.
128
+
129
+ Proof. For each $0 \leq i, k \leq d$ and each $1 \leq j \leq d + 1$ , let $f_{j}^{(i)}$ (resp. $p^{(i)}$ ) be the $i$ -th derivative of $f_{j}$ (resp. $p$ ), and let $\alpha_{k}^{(i)}$ be the coefficient of $x^{k}$ in $p^{(i)}(x)$ . Recall that the Wronskian of $(f_{1}, \ldots, f_{d + 1})$ is defined to be the determinant of the matrix $M(x) \coloneqq [f_{j}^{(i - 1)}(x)]_{1 \leq i, j \leq d + 1}$ . Since $f_{1}, \ldots, f_{d + 1}$ are
130
+
131
+ polynomial functions, it follows that $(f_{1},\ldots ,f_{d + 1})$ is a sequence of linearly independent functions if and only if its Wronskian is not the zero function (LeVeque, 1956, Thm. 4.7(a)). Clearly, if $a_{i} = a_{j}$ for $i\neq j$ , then $\det M(x)$ is identically zero. Thus, it suffices to show that if $a_1,\dots ,a_{d + 1}$ are distinct, then the evaluation $\operatorname *{det}M(1)$ of this Wronskian at $x = 1$ gives a non-zero value.
132
+
133
+ Now, the $(i,j)$ -th entry of $M(1)$ equals $a_{j}^{i-1}p^{(i-1)}(a_{j})$ , so $M(1) = M'M''$ , where $M'$ is an upper triangular matrix whose $(i,j)$ -th entry equals $\alpha_{j-i}^{(i-1)}$ , and $M'' = [a_{j}^{i-1}]_{1 \leq i,j \leq d+1}$ is the transpose of a Vandermonde matrix, whose determinant is
134
+
135
+ $$
136
+ \det (M ^ {\prime \prime}) = \prod_ {1 \leq i < j \leq d + 1} (a _ {j} - a _ {i}).
137
+ $$
138
+
139
+ Note that the $k$ -th diagonal entry of $M'$ is $\alpha_0^{(k-1)} = (k-1)! \alpha_{k-1}^{(0)}$ , which is non-zero by assumption, so $\det(M') \neq 0$ . Thus, if $a_1, \ldots, a_{d+1}$ are distinct, then $\det M(1) = \det(M') \det(M'') \neq 0$ .
140
+
141
+ Definition 5.2. Given $N \geq 1$ , $W \in \mathcal{W}_N^{n,m}$ , $x_0 \in \mathbb{R}^n$ , and any function $g: \mathbb{R} \to \mathbb{R}$ , let $\mathcal{F}_{g,x_0}(W)$ denote the sequence of functions $(f_1, \ldots, f_N)$ , such that each $f_j: \mathbb{R}^n \to \mathbb{R}$ is defined by the map $x \mapsto g(\widehat{\mathbf{w}}_j^{(1)} \cdot (x - x_0))$ . Also, define the set
142
+
143
+ $$
144
+ { } ^ { g } \mathcal { W } _ { n , N ; x _ { 0 } } ^ { \operatorname* { i n d } } : = \left\{ W \in \mathcal { W } _ { N } ^ { n , m } : \mathcal { F } _ { g , x _ { 0 } } ( W ) \text { ~ i s ~ l i n e a r l y ~ i n d e p e n d e n t ~ } \right\} .
145
+ $$
146
+
147
+ Note that the value of $m$ is irrelevant for defining ${}^g\mathcal{W}_{n,N;x_0}^{\mathrm{ind}}$ .
148
+
149
+ Remark 5.3. Given $\mathbf{a} = (a_1, \ldots, a_n) \in \mathbb{R}^n$ , consider the ring automorphism $\varphi: \mathcal{P}(\mathbb{R}^n) \to \mathcal{P}(\mathbb{R}^n)$ induced by $x_i \mapsto x_i - a_i$ for all $1 \leq i \leq n$ . For any $f_1, \ldots, f_k \in \mathcal{P}(\mathbb{R}^n)$ and scalars $\alpha_1, \ldots, \alpha_k \in \mathbb{R}$ , note that $\alpha_1 f_1 + \dots + \alpha_k f_k = 0$ if and only if $\alpha_1 \varphi(f_1) + \dots + \alpha_k \varphi(f_k) = 0$ , thus linear independence is preserved under $\varphi$ . Consequently, if the function $g$ in Definition 5.2 is polynomial, then ${}^g \mathcal{W}_{n,N; x_0}^{\mathrm{ind}} = {}^g \mathcal{W}_{n,N; 0_n}^{\mathrm{ind}}$ for all $x_0 \in \mathbb{R}^n$ .
150
+
151
+ Corollary 5.4. Let $m$ be arbitrary. If $p \in \mathcal{P}_d(\mathbb{R})$ has all-non-zero coefficients, then ${}^p\mathcal{W}_{1,d+1;0}^{\mathrm{ind}}$ is a dense subset of $\mathcal{W}_{d+1}^{1,m}$ (in the Euclidean metric).
152
+
153
+ Proof. For all $1 \leq j < j' \leq N$ , let $\mathcal{A}_{j,j'} := \{W \in \mathcal{W}_{d+1}^{1,m} : w_{1,j'}^{(1)} - w_{1,j}^{(1)} \neq 0\}$ , and note that $\mathcal{A}_{j,j'}$ is dense in $\mathcal{W}_{d+1}^{1,m}$ . So by Theorem 5.1, ${}^p\mathcal{W}_{1,d+1;0}^{\mathrm{ind}} = \bigcap_{1 \leq j < j' \leq N} \mathcal{A}_{j,j'}$ is also dense in $\mathcal{W}_{d+1}^{1,m}$ .
154
+
155
+ As we have seen in the proof of Theorem 5.1, Vandermonde matrices play an important role. To extend this theorem (and Corollary 5.4) to the multivariate case, we need a generalization of the Vandermonde matrix as described in (D'Andrea & Tabera, 2009). (Other generalizations of the Vandermonde matrix exist in the literature.) First, define the sets
156
+
157
+ $$
158
+ \Lambda_ {\leq d} ^ {n} := \left\{\left(\alpha_ {1}, \dots , \alpha_ {n}\right) \in \mathbb {N} ^ {n}: \alpha_ {1} + \dots + \alpha_ {n} \leq d \right\};
159
+ $$
160
+
161
+ $$
162
+ \mathcal {M} _ {\leq d} ^ {n} := \left\{\left(x \mapsto x ^ {\alpha}\right) \in \mathcal {P} \left(\mathbb {R} ^ {n}\right): \alpha \in \Lambda_ {\leq d} ^ {n} \right\}.
163
+ $$
164
+
165
+ It is easy to show that $|\Lambda_{\leq d}^n| = \binom{n+d}{d}$ , and that the set $\mathcal{M}_{\leq d}^n$ of monomial functions forms a basis for $\mathcal{P}_{\leq d}(\mathbb{R}^n)$ . Sort the $n$ -tuples in $\Lambda_{\leq d}^n$ in colexicographic order, i.e. $(\alpha_1, \ldots, \alpha_n) < (\alpha_1', \ldots, \alpha_n')$ if and only if $\alpha_i < \alpha_i'$ for the largest index $i$ such that $\alpha_i \neq \alpha_i'$ . Let $\lambda_1 < \dots < \lambda \binom{n+d}{d}$ denote all the $\binom{n+d}{d}$ $n$ -tuples in $\Lambda_{\leq d}^n$ after sorting. Analogously, let $q_1, \ldots, q\binom{n+d}{d}$ be all the monomial functions in $\mathcal{M}_{\leq d}^n$ in this order, i.e. each $q_k: \mathbb{R}^n \to \mathbb{R}$ is given by the map $x \mapsto x^{\lambda_k}$ . Given any sequence $(v_1, \ldots, v\binom{n+d}{d})$ of vectors in $\mathbb{R}^n$ , the generalized Vandermonde matrix associated to it is
166
+
167
+ $$
168
+ Q = Q \left[ v _ {1}, \dots , v _ {\binom {n + d} {d}} \right] := \left[ q _ {i} \left(v _ {j}\right) \right] _ {1 \leq i, j \leq \binom {n + d} {d}}. \tag {2}
169
+ $$
170
+
171
+ Definition 5.5. Given any $W \in \mathcal{W}_{\binom{n+d}{d}}^{n,m}$ , we define the non-bias Vandermonde matrix of $W$ to be the generalized Vandermonde matrix $Q[W] := [q_i(\widehat{\mathbf{w}}_j^{(1)})]_{1 \leq i,j \leq \binom{n+d}{d}}$ associated to $(\widehat{\mathbf{w}}_1^{(1)}, \ldots, \widehat{\mathbf{w}}_{\binom{n+d}{d}}^{(1)})$ .
172
+
173
+ Theorem 5.6. Let $m$ be arbitrary, let $p \in \mathcal{P}_d(\mathbb{R}^n)$ , and suppose that $p$ has all-non-zero coefficients. Also, suppose that $p_1, \ldots, p_k \in \mathcal{P}(\mathcal{W}_{\binom{n+d}{d}}^{n,m})$ are fixed polynomial functions on the non-bias weights of the first layer. Define the following sets:
174
+
175
+ $$
176
+ \mathcal{U}:= \{W\in \mathcal{W}_{\binom{n + d}{d}}^{n,m}:p_{i}(W) = 0for all 1\leq i\leq k\} ;
177
+ $$
178
+
179
+ $$
180
+ { } ^ { p } \mathcal { U } ^ { \mathrm { i n d } } : = \left\{ W \in \mathcal { U } : \mathcal { F } _ { p , \mathbf { 0 } _ { n } } ^ { \prime } ( W ) \text { ~ i s ~ l i n e a r l y ~ i n d e p e n d e n t ~ } \right\} .
181
+ $$
182
+
183
+ If there exists $W \in \mathcal{U}$ such that the non-bias Vandermonde matrix of $W$ is non-singular, then ${}^p\mathcal{U}^{\mathrm{ind}}$ is dense in $\mathcal{U}$ (w.r.t. the Euclidean metric).
184
+
185
+ Proof. We essentially extend the ideas in the proofs of Theorem 5.1 and Corollary 5.4, using the notion of generalized Wronskians; see Appendix A.1 for proof details. $\square$
186
+
187
+ Corollary 5.7. Let $m$ be arbitrary. If $p \in \mathcal{P}(\mathbb{R})$ is a fixed polynomial function of degree $d$ with all non-zero coefficients, then ${}^p\mathcal{W}_{n,\binom{n+d}{d};0_n}^{\mathrm{ind}}$ is a dense subset of $\mathcal{W}_{\binom{n+d}{d}}^{n,m}$ .
188
+
189
+ Proof. By Theorem 5.6, it suffices to show that there is some $W \in \mathcal{W}_{\binom{n+m}{d}}$ such that the non-bias Vandermonde matrix of $W$ is non-singular; see Appendix A.2 for proof details.
190
+
191
+ Remark 5.8. The proof of Corollary 5.7 still holds even if we restrict every non-bias weight $w_{i,j}^{(1)}$ in the first layer to satisfy $|w_{i,j}^{(1)}| < \lambda$ for some fixed constant $\lambda > 0$ .
192
+
193
+ For the rest of this section, let $\{\lambda_k\}_{k\in \mathbb{N}}$ be a divergent increasing sequence of positive real numbers, and let $\{Y_{k}\}_{k\in \mathbb{N}}$ be a sequence of closed intervals of $\mathbb{R}$ , such that $Y_{k^{\prime}}\subseteq Y_{k}$ whenever $k^{\prime}\leq k$ , and such that each interval $Y_{k} = [y_{k}^{\prime},y_{k}^{\prime \prime}]$ has length $\lambda_{k}$ . Let $d\geq 1$ be an integer, and suppose $\sigma \in \mathcal{C}(\mathbb{R})$ . For each $k\in \mathbb{N}$ , let $\sigma_{k}$ be the best polynomial approximant to $\sigma |_{Y_k}$ of degree $d$ . Given $r > 0$ and any integer $N\geq 1$ , define the closed ball $B_r^N \coloneqq \{x\in \mathbb{R}^N:\| x\| _2\leq r\}$ .
194
+
195
+ Lemma 5.9. If $d \geq 2$ , $\lim_{k \to \infty} E_d(\sigma|_{Y_k}) = \infty$ , and $\lambda_k \in \Omega(k^\gamma)$ for some $\gamma > 0$ , then for every $\varepsilon > 0$ , there is a subsequence $\{k_t\}_{t \in \mathbb{N}}$ of $\mathbb{N}$ , and a sequence $\{y_{k_t}\}_{t \in \mathbb{N}}$ of real numbers, such that $y_{k_t}' < y_{k_t} < y_{k_t}''$ , $\sigma(y_{k_t}) = \sigma_{k_t}(y_{k_t})$ , and
196
+
197
+ $$
198
+ \frac {\operatorname* {m i n} \left\{\left| y _ {k _ {t}} - y _ {k _ {t}} ^ {\prime} \right| , \left| y _ {k _ {t}} - y _ {k _ {t}} ^ {\prime \prime} \right| \right\}}{\left| y _ {k _ {t}} ^ {\prime} - y _ {k _ {t}} ^ {\prime \prime} \right|} > \frac {1}{d + 1} - \varepsilon ,
199
+ $$
200
+
201
+ for all $t\in \mathbb{N}$ . (See Appendix B for proof details.)
202
+
203
+ The proofs of the next three lemmas can be found in Appendix C.
204
+
205
+ Lemma 5.10. For any constant $\gamma > 0$ ,
206
+
207
+ $$
208
+ \lim _ {k \rightarrow \infty} \frac {\left\| \sigma_ {k} - \sigma \right\| _ {\infty , Y _ {k}}}{\left(\lambda_ {k}\right) ^ {1 + \gamma}} = 0.
209
+ $$
210
+
211
+ Lemma 5.11. Let $K \geq N \geq 1$ be integers, let $r_0, \ldots, r_N \geq 1$ be fixed real numbers, and let $S(\lambda)$ be a set $\{p_0(\lambda), \ldots, p_N(\lambda)\}$ of $N + 1$ affinely independent points in $\mathbb{R}^K$ , parametrized by $\lambda > 0$ , where each point $p_i(\lambda)$ has (Cartesian) coordinates $(\lambda^{r_i}p_{i,1}, \ldots, \lambda^{r_i}p_{i,K})$ for some fixed non-zero scalars $p_{i,1}, \ldots, p_{i,K}$ . Let $\Delta(\lambda)$ be the convex hull of $S(\lambda)$ , i.e. $\Delta(\lambda)$ is an $N$ -simplex, and for each $0 \leq i \leq N$ , let $h_i(\lambda)$ be the height of $\Delta(\lambda)$ w.r.t. apex $p_i(\lambda)$ . Let $h(\lambda) := \max\{h_i(\lambda) : 0 \leq i \leq N\}$ and $r_{\min} := \min\{r_1, \ldots, r_N\}$ . If $r_j > r_{\min}$ for some $0 \leq j \leq N$ , then there exists some $\gamma > 0$ such that $h(\lambda) \in \Omega(\lambda^{r_{\min} + \gamma})$ .
212
+
213
+ Lemma 5.12. Let $M, N \geq 1$ be integers, let $\tau > 0$ , and let $0 < \theta < 1$ . Suppose $\varphi: \mathbb{R}^M \to \mathbb{R}^N$ is a continuous open map such that $\varphi(\mathbf{0}_M) = \mathbf{0}_N$ , and $\varphi(\lambda x) \geq \lambda \varphi(x)$ for all $x \in \mathbb{R}^M$ , $\lambda > 0$ . Let $\{U_k\}_{k \in \mathbb{N}}$ be a sequence where each $U_k$ is a dense subspace of $B_{\lambda_k}^M \setminus B_{\theta \lambda_k}^M$ . Then for every $\delta > 0$ , there exists some (sufficiently large) $k \in \mathbb{N}$ , and some points $u_0, \ldots, u_N$ in $U_k$ , such that for each point $p \in B_{\tau}^N$ , there are scalars $b_0, \ldots, b_N \geq 0$ satisfying $p = \sum_{i=0}^{N} b_i \varphi(u_i)$ , $b_0 + \cdots + b_N = 1$ , and $|b_i - \frac{1}{N}| < \delta$ for all $0 \leq i \leq N$ .
214
+
215
+ Outline of strategy for proving Theorem 3.1. The first crucial insight is that $\mathcal{P}_{\leq d}(\mathbb{R}^n)$ , as a real vector space, has dimension $\binom{n+d}{d}$ . Our strategy is to consider $N = \binom{n+d}{d}$ hidden units. Every hidden unit represents a continuous function $g_j: X \to \mathbb{R}$ determined by its weights $W$ and the activation function $\sigma$ . If $g_1, \ldots, g_N$ can be well-approximated (on $X$ ) by linearly independent polynomial functions in $\mathcal{P}_{\leq d}(\mathbb{R}^n)$ , then we can choose suitable linear combinations of these $N$ functions to approximate all coordinate functions $f^{[t]}$ (independent of how large $m$ is). To approximate each $g_j$ , we consider a suitable sequence $\{\sigma_{\lambda_k}\}_{k=1}^{\infty}$ of degree $d$ polynomial approximations to $\sigma$ , so that $g_j$ is approximated by a sequence of degree $d$ polynomial functions $\{\widehat{g}_{j,k}^W\}_{k=1}^{\infty}$ . We shall also vary $W$ concurrently with $k$ , so that $\|\widehat{\mathbf{w}}_j^{(1)}\|_2$ increases together with $k$ . By Corollary 5.7, the weights can always be chosen so that $\widehat{g}_{1,k}^W, \ldots, \widehat{g}_{N,k}^W$ are linearly independent.
216
+
217
+ The second crucial insight is that every function in $\mathcal{P}_{\leq d}(\mathbb{R}^n)$ can be identified geometrically as a point in Euclidean $\binom{n+d}{d}$ -space. We shall choose the bias weights so that $\widehat{g}_{1,k}^{W}, \ldots, \widehat{g}_{N,k}^{W}$ correspond
218
+
219
+ to points on a hyperplane, and we shall consider the barycentric coordinates of the projections of both $f^{[t]}$ and the constant function onto this hyperplane, with respect to $\widehat{g}_{1,k}^{W},\ldots ,\widehat{g}_{N,k}^{W}$ . As the values of $k$ and $\| \widehat{\mathbf{w}}_j^{(1)}\| _2$ increase, both projection points have barycentric coordinates that approach $(\frac{1}{N},\dots,\frac{1}{N})$ , and their difference approaches 0; cf. Lemma 5.12. This last observation, in particular, when combined with Lemma 5.9 and Lemma 5.10, is a key reason why the minimum number $N$ of hidden units required for the UAP to hold is independent of the approximation error threshold $\varepsilon$ .
220
+
221
+ Proof of Theorem 3.1. Fix some $\varepsilon >0$ , and for brevity, let $N = \binom{n+d}{d}$ . Theorem 3.1 is trivially true when $f$ is constant, so assume $f$ is non-constant. Fix a point $x_0\in X$ , and define $f_{\mathbf{0}}\in \mathcal{C}(X,\mathbb{R}^{m})$ by $f_{\mathbf{0}}^{[t]}\coloneqq f^{[t]} - f^{[t]}(x_0)$ for all $1\leq t\leq m$ . Next, let $r_X(x_0)\coloneqq \sup \{\| x - x_0\| _2:x\in X\}$ , and note that $r_X(x_0) < \infty$ , since $X$ is compact. By replacing $X$ with a closed tubular neighborhood of $X$ if necessary, we may assume without loss of generality that $r_X(x_0) > 0$ .
222
+
223
+ Define $\{\lambda_k\}_{k\in \mathbb{N}},\{Y_k\}_{k\in \mathbb{N}}$ and $\{\sigma_k\}_{k\in \mathbb{N}}$ as before, with an additional condition that $\lambda_{k}\in \Omega (k^{\tau})$ for some $\tau >0$ . Assume without loss of generality that there exists a sequence $\{y_k\}_{k\in \mathbb{N}}$ of real numbers, such that $y_{k}^{\prime} < y_{k} < y_{k}^{\prime \prime},\sigma (y_{k}) = \sigma_{k}(y_{k})$ , and
224
+
225
+ $$
226
+ \frac {\operatorname* {m i n} \left\{\left| y _ {k} - y _ {k} ^ {\prime} \right| , \left| y _ {k} - y _ {k} ^ {\prime \prime} \right| \right\}}{\lambda_ {k}} = \frac {\operatorname* {m i n} \left\{\left| y _ {k} - y _ {k} ^ {\prime} \right| , \left| y _ {k} - y _ {k} ^ {\prime \prime} \right| \right\}}{\left| y _ {k} ^ {\prime} - y _ {k} ^ {\prime \prime} \right|} > \frac {1}{d + 2}, \tag {3}
227
+ $$
228
+
229
+ for all $k \in \mathbb{N}$ . The validity of this assumption in the case $\lim_{k \to \infty} E_d(\sigma|_{Y_k}) = \infty$ is given by Lemma 5.9. If instead $\lim_{k \to \infty} E_d(\sigma|_{Y_k}) < \infty$ , then as $k \to \infty$ , the sequence $\{\sigma_k\}_{k \in \mathbb{N}}$ converges to some $\widehat{\sigma} \in \mathcal{P}_{\leq d}(\mathbb{R})$ . Hence, the assumption is also valid in this case, since for any $\widehat{y} \in \mathbb{R}$ such that $\sigma(\widehat{y}) = \widehat{\sigma}(\widehat{y})$ , we can always choose $\{Y_k\}_{k \in \mathbb{N}}$ to satisfy $\frac{y'_k + y'_k''}{2} = \widehat{y}$ for all $k \in \mathbb{N}$ , which then allows us to choose $\{y_k\}_{k \in \mathbb{N}}$ that satisfies $\lim_{k \to \infty} \frac{\min\{|y_k - y'_k|, |y_k - y'_k''|\}}{\lambda_k} = \frac{1}{2} > \frac{1}{d + 2}$ .
230
+
231
+ By Lemma 5.10, we may further assume that $\| \sigma_k - \sigma \|_{\infty ,Y_k} < \frac{\varepsilon(\lambda_k)^{1 + \gamma}}{C}$ for all $k\in \mathbb{N}$ , where $C > 0$ and $\gamma >0$ are constants whose precise definitions we give later. Also, for any $W\in \mathcal{W}_N^{n,m}$ , we can choose $\sigma^{\prime}\in \mathcal{C}(\mathbb{R})$ that is arbitrarily close to $\sigma$ in the uniform metric, such that $\| \rho_W^\sigma -\rho_W^{\sigma '}\|_{\infty ,X}$ is arbitrarily small. Since $\sigma \in \mathcal{C}(\mathbb{R})\backslash \mathcal{P}_{\leq d - 1}(\mathbb{R})$ by assumption, we may hence perturb $\sigma$ if necessary, and assume without loss of generality that every $\sigma_{k}$ is a polynomial of degree $d$ with all-non-zero coefficients, such that $\sigma_k(y_k)\neq 0$ .
232
+
233
+ For every $r > 0$ and $k\in \mathbb{N}$ , let $\mathcal{W}_r^\prime \coloneqq \{W\in \mathcal{W}_N^{n,m}:\| \widehat{\mathbf{w}}_j^{(1)}\| _2\leq r$ for all $1\leq j\leq N\}$ , and define
234
+
235
+ $$
236
+ \lambda_ {k} ^ {\prime} := \sup \left\{r > 0: \left\{y _ {k} + \widehat {\mathbf {w}} _ {j} ^ {(1)} \cdot (x - x _ {0}) \in \mathbb {R}: x \in X, W \in \mathcal {W} _ {r} ^ {\prime} \right\} \subseteq Y _ {k} \text {f o r a l l} 1 \leq j \leq N \right\}.
237
+ $$
238
+
239
+ Each $\lambda_k^\prime$ is well-defined, since $r_X(x_0) < \infty$ . Note also that $\lambda_k^\prime r_X(x_0) = \min \{|y_k - y_k^\prime |,|y_k - y_k^{\prime \prime}|\}$ by definition, hence it follows from (3) that $\frac{\lambda_k}{\lambda_k'} < (d + 2)r_X(x_0)$ . In particular, $\{\lambda_k^\prime \}_{k\in \mathbb{N}}$ is a divergent increasing sequence of positive real numbers.
240
+
241
+ Given any $p \in \mathcal{P}_{\leq d}(\mathbb{R}^n)$ , let $\nu(p) \in \mathbb{R}^N$ denote the vector of coefficients with respect to the basis $\{q_1(x - x_0), \ldots, q_N(x - x_0)\}$ (i.e. if $\nu(p) = (\nu_1, \ldots, \nu_N)$ , then $p(x) = \sum_{1 \leq i \leq N} \nu_i q_i(x - x_0)$ ), and let $\widehat{\nu}(p) \in \mathbb{R}^{N-1}$ be the truncation of $\nu(p)$ by removing the first coordinate. Note that $q_1(x)$ is the constant monomial, so this first coordinate $\nu_1$ is the coefficient of the constant term. For convenience, let $\nu_i(p)$ (resp. $\widehat{\nu}_i(p)$ ) be the $i$ -th entry of $\nu(p)$ (resp. $\widehat{\nu}(p)$ ).
242
+
243
+ For each $k\in \mathbb{N}$ $W\in \mathcal{W}_{\lambda_k'}'$ $1\leq j\leq N$ , define functions $g_{j,k}^{W},\widehat{g}_{j,k}^{W}$ in $\mathcal{C}(X)$ by $x\mapsto \sigma (\mathbf{w}_j^{(1)}\cdot (1,x))$ and $x\mapsto \sigma_k(\mathbf{w}_j^{(1)}\cdot (1,x))$ respectively. By definition, $\nu_{i}(\widehat{g}_{j,k}^{W})$ can be treated as a function of $W$ and note that $\nu_{i}(\widehat{g}_{j,k}^{\lambda W}) = \lambda^{\deg q_{i}}\nu_{i}(\widehat{g}_{j,k}^{W})$ for any $\lambda >0$ . (Here, deg $q_{i}$ denotes the total degree of $q_{i}$ .) Since $\deg q_i = 0$ only if $i = 1$ , it then follows that $\widehat{\nu}_i(\widehat{g}_{j,k}^{\lambda W})\geq \lambda \widehat{\nu}_i(\widehat{g}_{j,k}^W)$ for all $\lambda >0$
244
+
245
+ For each $k \in \mathbb{N}$ , define the "shifted" function $\sigma_k': Y_k \to \mathbb{R}$ by $y \mapsto \sigma_k(y + y_k)$ . Next, let $\mathcal{W}_k'' := {}^{\sigma_k'}\mathcal{W}_{n,N;x_0}^{\mathrm{ind}} \cap (\mathcal{W}_{\lambda_k'}' \setminus \mathcal{W}_{0.5\lambda_k'}')$ , and suppose $W \in \mathcal{W}_k''$ . Note that in the definition of $\mathcal{W}_k''$ , we do not impose any restrictions on the bias weights. Thus, given any such $W$ , we could choose the bias weights of $W^{(1)}$ to be $w_{j,0}^{(1)} = y_k - \widehat{\mathbf{w}}_j^{(1)} \cdot x_0$ for all $1 \leq j \leq N$ . This implies that each $\widehat{g}_{j,k}^W$ represents the map $x \mapsto \sigma_k(\widehat{\mathbf{w}}_j^{(1)} \cdot (x - x_0) + y_k)$ , hence $\widehat{g}_{j,k}^W(x_0) = \sigma_k(y_k) = \sigma(y_k)$ . Consequently,
246
+
247
+ by the definitions of $Y_{k}$ and $\mathcal{W}_{\lambda_k'}'$ , we infer that
248
+
249
+ $$
250
+ \left\| g _ {j, k} ^ {W} - \widehat {g} _ {j, k} ^ {W} \right\| _ {\infty , X} < \frac {\varepsilon \left(\lambda_ {k}\right) ^ {1 + \gamma}}{C}. \tag {4}
251
+ $$
252
+
253
+ By Corollary 5.7 and Remark 5.3, $\mathcal{W}_k^{\prime\prime}$ is dense in $(\mathcal{W}_{\lambda_k'}^\prime \setminus \mathcal{W}_{0.5\lambda_k'}^\prime)$ , so such a $W$ exists (with its bias weights given as above). By the definition of $\sigma_k'\mathcal{W}_{n,N;x_0}^{\mathrm{ind}}$ , we infer that $\{\widehat{g}_{1,k}^{W},\ldots ,\widehat{g}_{N,k}^{W}\}$ is linearly independent and hence spans $\mathcal{P}_{\leq d}(X)$ . Thus, for every $1\leq t\leq m$ , there exist $a_{1,k}^{[t]},\ldots ,a_{N,k}^{[t]}\in \mathbb{R}$ which are uniquely determined once $k$ is fixed, such that $f_{\mathbf{0}}^{[t]} = a_{1,k}^{[t]}\widehat{g}_{1,k}^{W} + \dots +a_{N,k}^{[t]}\widehat{g}_{N,k}^{W}$ . Evaluating both sides of this equation at $x = x_0$ , we then get
254
+
255
+ $$
256
+ a _ {1, k} ^ {[ t ]} + \dots + a _ {N, k} ^ {[ t ]} = 0. \tag {5}
257
+ $$
258
+
259
+ For each $\ell \in \mathbb{R}$ , define the hyperplane $\mathcal{H}_{\ell} \coloneqq \{(u_1, \ldots, u_N) \in \mathbb{R}^N : u_1 = \ell\}$ . Recall that $q_1(x)$ is the constant monomial, so the first coordinate of each $\nu(\widehat{g}_{j,k}^W)$ equals $\sigma(y_k)$ , which implies that $\nu(\widehat{g}_{1,k}^W), \ldots, \nu(\widehat{g}_{N,k}^W)$ are $N$ points on $\mathcal{H}_{\sigma(y_k)} \cong \mathbb{R}^{N-1}$ . Let $c_f \coloneqq \max \{\|\widehat{\nu}(f^{[t]})\|_2 : 1 \leq t \leq m\}$ . (This is non-zero, since $f$ is non-constant.) Note that $\mathbf{0}_{N-1}$ and $\widehat{\nu}(f^{[t]})$ (for all $t$ ) are points in $B_{c_f}^{N-1}$ . So for any $\delta > 0$ , Lemma 5.12 implies that there exists some sufficiently large $k \in \mathbb{N}$ such that we can choose some $W \in \mathcal{W}_k''$ , so that there are non-negative scalars $b_{j,k}^{[t]}, b_{j,k}'$ (for $1 \leq j \leq N$ , $1 \leq t \leq m$ ) contained in the interval $(\frac{1}{N} - \delta, \frac{1}{N} + \delta)$ that satisfy the following:
260
+
261
+ $$
262
+ b _ {1, k} ^ {[ t ]} + \dots + b _ {N, k} ^ {[ t ]} = b _ {1, k} ^ {\prime} + \dots + b _ {N, k} ^ {\prime} = 1 \quad (\text {f o r a l l} 1 \leq t \leq m);
263
+ $$
264
+
265
+ $$
266
+ \mathbf {0} _ {N - 1} = \sum_ {j = 1} ^ {N} b _ {j, k} ^ {\prime} \widehat {\nu} (\widehat {g} _ {j, k} ^ {W}); \quad \widehat {\nu} (f ^ {[ t ]}) = \sum_ {j = 1} ^ {N} b _ {j, k} ^ {[ t ]} \widehat {\nu} (\widehat {g} _ {j, k} ^ {W}) \quad (\text {f o r a l l} 1 \leq t \leq m).
267
+ $$
268
+
269
+ Note that $\nu(f_{\mathbf{0}}^{[t]} + \sigma(y_k)) = b_{1,k}^{[t]}\nu(\widehat{g}_{1,k}^W) + \dots + b_{N,k}^{[t]}\nu(\widehat{g}_{N,k}^W)$ and $(\mathbf{0}_{N-1}, \sigma(y_k)) = b_{1,k}'\nu(\widehat{g}_{1,k}^W) + \dots + b_{N,k}'\nu(\widehat{g}_{N,k}^W)$ , so we get
270
+
271
+ $$
272
+ f _ {\mathbf {0}} ^ {[ t ]} = \left(b _ {1, k} ^ {[ t ]} - b _ {1, k} ^ {\prime}\right) \widehat {g} _ {1, k} ^ {W} + \dots + \left(b _ {N, k} ^ {[ t ]} - b _ {N, k} ^ {\prime}\right) \widehat {g} _ {N, k} ^ {W}.
273
+ $$
274
+
275
+ Since $a_{1,k}^{[t]}, \ldots, a_{N,k}^{[t]}$ are unique (for fixed $k$ ), we infer that $a_{j,k}^{[t]} = b_{j,k}^{[t]} - b_{j,k}'$ for each $1 \leq j \leq N$ . Thus, for this sufficiently large $k$ , it follows from $b_{j,k}^{[t]}, b_{j,k}' \in (\frac{1}{N} - \delta, \frac{1}{N} + \delta)$ that
276
+
277
+ $$
278
+ a _ {j, k} ^ {[ t ]} \geq \left(\frac {1}{N} - \delta\right) - \left(\frac {1}{N} + \delta\right) \geq - 2 \delta . \tag {6}
279
+ $$
280
+
281
+ Let $S_{k} \coloneqq \{\widehat{\nu}(\widehat{g}_{1,k}^{W}), \ldots, \widehat{\nu}(\widehat{g}_{N,k}^{W})\}$ , let $\Delta_{k}$ be the convex hull of $S_{k}$ , and for each $j$ , let $h_{j}(\Delta_{k})$ be the height of $\Delta_{k}$ w.r.t. apex $\widehat{\nu}(\widehat{g}_{j,k}^{W})$ . Let $h(\Delta_{k}) \coloneqq \max \{h_{j}(\Delta_{k}): 1 \leq j \leq N\}$ . Since $\widehat{\nu}_{i}(\widehat{g}_{j,k}^{\lambda W}) = \lambda^{\deg q_{i}} \widehat{\nu}_{i}(\widehat{g}_{j,k}^{W})$ for all $i$ , and since $d \geq 2$ (i.e. $\deg q_{N} > 1$ ), it follows from Lemma 5.11 that there exists some $\gamma > 0$ such that $h(\Delta_{k}) \in \Omega((\lambda_{k}')^{1 + \gamma})$ . Using this particular $\gamma > 0$ , we infer that there exists some constant $0 < C' < \infty$ such that $\frac{(\lambda_{k}')^{1 + \gamma}}{h(\Delta_{k})} < C'$ for all sufficiently large $k$ .
282
+
283
+ Note that $2\delta$ is an upper bound of the normalized difference for each barycentric coordinate of the two points $\widehat{\nu}(f^{[t]})$ and $\mathbf{0}_{N-1}$ (contained in $B_{c_f}^{N-1}$ ), which satisfies
284
+
285
+ $$
286
+ 2 \delta \leq \frac {c _ {f}}{h \left(\Delta_ {k}\right)} = \frac {c _ {f}}{\left(\lambda_ {k}\right) ^ {1 + \gamma}} \cdot \left(\frac {\lambda_ {k}}{\lambda_ {k} ^ {\prime}}\right) ^ {1 + \gamma} \cdot \frac {\left(\lambda_ {k} ^ {\prime}\right) ^ {1 + \gamma}}{h \left(\Delta_ {k}\right)} < \frac {c _ {f}}{\left(\lambda_ {k}\right) ^ {1 + \gamma}} [ (d + 2) r _ {X} (x _ {0}) ] ^ {1 + \gamma} C ^ {\prime}. \tag {7}
287
+ $$
288
+
289
+ Now, define $C \coloneqq 2Nc_f[(d + 2)r_X(x_0)]^{1 + \gamma}C' > 0$ . Thus, for sufficiently large $k$ , it follows from (5), (6) and (7) that
290
+
291
+ $$
292
+ \left| a _ {1, k} ^ {[ t ]} \right| + \dots + \left| a _ {N, k} ^ {[ t ]} \right| \leq a _ {1, k} ^ {[ t ]} + \dots + a _ {N, k} ^ {[ t ]} + 4 N \delta = 4 N \delta \leq \frac {C}{\left(\lambda_ {k}\right) ^ {1 + \gamma}} \tag {8}
293
+ $$
294
+
295
+ For this sufficiently large $k$ , define $g \in \mathcal{C}(X, \mathbb{R}^m)$ by $g^{[t]} = a_{1,k}^{[t]} g_{1,k}^{W} + \dots + a_{N,k}^{[t]} g_{N,k}^{W}$ for each $t$ . Using (4) and (8), it follows that
296
+
297
+ $$
298
+ \begin{array}{l} \| f _ {\mathbf {0}} ^ {[ t ]} - g ^ {[ t ]} \| _ {\infty , X} = \| a _ {1, k} ^ {[ t ]} (g _ {1, k} ^ {W} - \widehat {g} _ {1, k} ^ {W}) + \dots + a _ {N, k} ^ {[ t ]} (g _ {N, k} ^ {W} - \widehat {g} _ {N, k} ^ {W}) \| _ {\infty , X} \\ \leq | a _ {1, k} ^ {[ t ]} | \cdot \| g _ {1, k} ^ {W} - \widehat {g} _ {1, k} ^ {W} \| _ {\infty , X} + \dots + | a _ {N, k} ^ {[ t ]} | \cdot \| g _ {N, k} ^ {W} - \widehat {g} _ {N, k} ^ {W} \| _ {\infty , X} \\ < \varepsilon . \\ \end{array}
299
+ $$
300
+
301
+ Finally, for all $1 \leq t \leq m$ , let $w_{j,t}^{(2)} = a_{j,k}^{[t]}$ for each $1 \leq j \leq N$ , and let $w_{0,t}^{(2)} = f^{[t]}(x_0)$ . This gives $\rho_W^{\sigma[t]} = g^{[t]} + f^{[t]}(x_0)$ . Therefore, the identity $f^{[t]} = f_0^{[t]} + f^{[t]}(x_0)$ implies $\|f - \rho_W^\sigma\|_{\infty,X} < \varepsilon$ .
302
+
303
+ Notice that for all $\delta > 0$ , we showed in (6) that there is a sufficiently large $k$ such that $a_{j,k}^{[t]} \geq -2\delta$ . A symmetric argument yields $a_{j,k}^{[t]} \leq 2\delta$ . Thus, for all $\lambda > 0$ , we can choose $W$ so that all non-bias weights in $W^{(2)}$ are contained in the interval $(- \lambda, \lambda)$ ; this proves assertion (i) of the theorem.
304
+
305
+ Note also that we do not actually require $\delta > 0$ to be arbitrarily small. Suppose instead that we choose $k \in \mathbb{N}$ sufficiently large, so that the convex hull of $S_{k}$ contains $\mathbf{0}_{N-1}$ and $\widehat{\nu}(f^{[t]})$ (for all $t$ ). In this case, observe that our choice of $k$ depends only on $f$ (via $\widehat{\nu}(f^{[t]})$ ) and $\sigma$ (via the definition of $\{\lambda_{k}\}_{k \in \mathbb{N}}$ ). The inequality (7) still holds for any $\delta$ satisfying $b_{j,k}^{[t]}, b_{j,k}' \in \left(\frac{1}{N} - \delta, \frac{1}{N} + \delta\right)$ for all $j, t$ . Thus, our argument to show $\| f - \rho_{W}^{\sigma} \|_{\infty, X} < \varepsilon$ holds verbatim, which proves assertion (ii).
306
+
307
+ Proof of Theorem 3.2. Fix some $\varepsilon >0$ , and consider an arbitrary $t\in \{1,\ldots ,m\}$ . For each integer $d\geq 1$ , let $p_d^{[t]}$ be the best polynomial approximant to $f^{[t]}$ of degree $d$ . By Theorem 2.2, we have $\| f^{[t]} - p_d^{[t]}\|_{\infty ,X}\leq 6\omega_{f^{[t]}}(\frac{D}{2d})$ for all $d\geq 1$ , hence it follows from the definition of $d_{\varepsilon}$ that
308
+
309
+ $$
310
+ \left\| f ^ {[ t ]} - p _ {d _ {\varepsilon}} ^ {[ t ]} \right\| _ {\infty , X} \leq 6 \omega_ {f ^ {[ t ]}} \left(\frac {D}{2 d _ {\varepsilon}}\right) < \varepsilon .
311
+ $$
312
+
313
+ Define $\varepsilon' := \varepsilon - \max \{6\omega_{f^{[t]}}\left(\frac{D}{2d_{\varepsilon}}\right): 1 \leq t \leq m\}$ . Note that $\varepsilon' > 0$ , and $\| f^{[t]} - p_{d_{\varepsilon}}^{[t]}\|_{\infty, X} \leq \varepsilon - \varepsilon'$ (for all $1 \leq t \leq m$ ). By Theorem 3.1, there exists some $W \in \mathcal{W}_{(n + d_{\varepsilon})}$ satisfying $\| p_{d_{\varepsilon}}^{[t]} - \rho_W^{\sigma[t]}\|_{\infty, X} < \varepsilon'$ for all $1 \leq t \leq m$ , which implies
314
+
315
+ $$
316
+ \| f ^ {[ t ]} - \rho_ {W} ^ {\sigma [ t ]} \| _ {\infty , X} \leq \| f ^ {[ t ]} - p _ {d _ {\varepsilon}} ^ {[ t ]} \| _ {\infty , X} + \| p _ {d _ {\varepsilon}} ^ {[ t ]} - \rho_ {W} ^ {\sigma [ t ]} \| _ {\infty , X} < (\varepsilon - \varepsilon^ {\prime}) + \varepsilon^ {\prime} = \varepsilon ,
317
+ $$
318
+
319
+ therefore $\| f - \rho_W^\sigma \|_{\infty, X} < \varepsilon$ . Conditions (i) and (ii) follow from Theorem 3.1. Finally, note that $\omega_{f^{[t]}}\left(\frac{D}{2d}\right) \in \mathcal{O}\left(\frac{1}{d}\right)$ (for fixed $D$ ), i.e. $d_{\varepsilon} \in \mathcal{O}\left(\frac{1}{\varepsilon}\right)$ , hence $\binom{n+d_{\varepsilon}}{d_{\varepsilon}} = \frac{n(n-1)...(n-d_{\varepsilon}+1)}{n!} \in \mathcal{O}(\varepsilon^{-n})$ .
320
+
321
+ Proof of Theorem 3.3. Most of the work has already been done earlier in the proofs of Theorem 3.1 and Theorem 3.2. The key observation is that $\operatorname{det}(Q[W])$ is a non-zero polynomial in terms of the weights $W$ , hence $\{\operatorname{det}(Q[W]) \neq 0 : W \in \mathcal{W}_{\binom{n+d}{d}}\}$ is dense in $\mathcal{W}_{\binom{n+d}{d}}$ , or equivalently, its complement has Lebesgue measure zero.
322
+
323
+ # 6 CONCLUSION AND FURTHER REMARKS
324
+
325
+ Theorem 5.6 is rather general, and could potentially be used to prove analogs of the universal approximation theorem for other classes of neural networks, such as convolutional neural networks and recurrent neural networks. In particular, finding a single suitable set of weights (as a representative of the infinitely many possible sets of weights in the given class of neural networks), with the property that its corresponding "non-bias Vandermonde matrix" (see Definition 5.5) is non-singular, would serve as a straightforward criterion for showing that the UAP holds for the given class of neural networks (with certain weight constraints). We formulated this criterion to be as general as we could, with the hope that it would applicable to future classes of "neural-like" networks.
326
+
327
+ We believe our algebraic approach could be emulated to eventually yield a unified understanding of how depth, width, constraints on weights, and other architectural choices, would influence the approximation capabilities of arbitrary neural networks.
328
+
329
+ Finally, we end our paper with an open-ended question. The proofs of our results in Section 5 seem to suggest that non-bias weights and bias weights play very different roles. We could impose very strong restrictions on the non-bias weights and still have the UAP. What about the bias weights?
330
+
331
+ # ACKNOWLEDGMENTS
332
+
333
+ This research is supported by the National Research Foundation, Singapore, under its NRFF program (NRFFAI1-2019-0005).
334
+
335
+ # REFERENCES
336
+
337
+ Raman Arora, Amitabh Basu, Poorya Mianjy, and Anirbit Mukherjee. Understanding deep neural networks with rectified linear units. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=B1J_rgWRW.
338
+ G. Cybenko. Approximation by superpositions of a sigmoidal function. Mathematics of Control, Signals and Systems, 2(4):303-314, December 1989. doi: 10.1007/BF02551274. URL https://doi.org/10.1007/BF02551274.
339
+ Carlos D'Andrea and Luis Felipe Tabera. Tropicalization and irreducibility of generalized Vandermonde determinants. Proc. Amer. Math. Soc., 137(11):3647-3656, 2009. ISSN 0002-9939. doi: 10.1090/S0002-9939-09-09951-1. URL https://doi.org.library.sutd.edu.sg:2443/10.1090/S0002-9939-09-09951-1.
340
+ Olivier Delalleau and Yoshua Bengio. Shallow vs. deep sum-product networks. In J. Shawe-Taylor, R. S. Zemel, P. L. Bartlett, F. Pereira, and K. Q. Weinberger (eds.), Advances in Neural Information Processing Systems 24, pp. 666-674. Curran Associates, Inc., 2011. URL http://papers.nips.cc/paper/4350-shallow-vs-deep-sum-product-networks.pdf.
341
+ Ronald A. DeVore, Ralph Howard, and Charles Micchelli. Optimal nonlinear approximation. Manuscripta Math., 63(4):469-478, 1989. ISSN 0025-2611. doi: 10.1007/BF01171759. URL https://doi.org.library.sutd.edu.sg:2443/10.1007/BF01171759.
342
+ David Eisenbud. Commutative algebra, volume 150 of Graduate Texts in Mathematics. Springer-Verlag, New York, 1995. ISBN 0-387-94268-8; 0-387-94269-6. doi: 10.1007/978-1-4612-5350-1. URL http://dx.doi.org/10.1007/978-1-4612-5350-1. With a view toward algebraic geometry.
343
+ Ronen Eldan and Ohad Shamir. The power of depth for feedforward neural networks. In Vitaly Feldman, Alexander Rakhlin, and Ohad Shamir (eds.), 29th Annual Conference on Learning Theory, volume 49 of Proceedings of Machine Learning Research, pp. 907-940, Columbia University, New York, New York, USA, 23-26 Jun 2016. PMLR. URL http://proceedings.mlr.press/v49/eldan16.html.
344
+ K. Funahashi. On the approximate realization of continuous mappings by neural networks. Neural Netw., 2(3):183-192, May 1989. ISSN 0893-6080. doi: 10.1016/0893-6080(89)90003-8. URL http://dx.doi.org/10.1016/0893-6080(89)90003-8.
345
+ Boris Hanin. Universal function approximation by deep neural nets with bounded width and relu activations. 08 2017. Preprint arXiv:1708.02691 [stat.ML].
346
+ K. Hornik, M. Stinchcombe, and H. White. Multilayer feedforward networks are universal approximators. Neural Netw., 2(5):359-366, July 1989. ISSN 0893-6080. doi: 10.1016/0893-6080(89) 90020-8. URL http://dx.doi.org/10.1016/0893-6080(89)90020-8.
347
+ Kurt Hornik. Approximation capabilities of multilayer feedforward networks. Neural Networks, 4(2):251 - 257, 1991. ISSN 0893-6080. doi: https://doi.org/10.1016/0893-6080(91)90009-T. URL http://www.sciencedirect.com/science/article/pii/089360809190009T.
348
+ M. I. Kadec. On the distribution of points of maximum deviation in the approximation of continuous functions by polynomials. Uspehi Mat. Nauk, 15(1 (91)):199-202, 1960. ISSN 0042-1316.
349
+ M. I. Kademie. On the distribution of points of maximum deviation in the approximation of continuous functions by polynomials. Amer. Math. Soc. Transl. (2), 26:231-234, 1963. ISSN 0065-9290. doi: 10.1090/trans2/026/09. URL https://doi-org.library.sutd.edu.sg:2443/10.1090/trans2/026/09.
350
+ Moshe Leshno, Vladimir Ya. Lin, Allan Pinkus, and Shimon Schocken. Multilayer feedforward networks with a nonpolynomial activation function can approximate any function. Neural Networks, 6(6):861 - 867, 1993. ISSN 0893-6080. doi: https://doi.org/10.1016/S0893-6080(05)80131-5. URL http://www.sciencedirect.com/science/article/pii/S0893608005801315.
351
+ William Judson LeVeque. Topics in number theory. Vols. 1 and 2. Addison-Wesley Publishing Co., Inc., Reading, Mass., 1956.
352
+
353
+ Shiyu Liang and R. Srikant. Why deep neural networks? CoRR, abs/1610.04161, 2016. URL http://arxiv.org/abs/1610.04161.
354
+ Henry W. Lin, Max Tegmark, and David Rolnick. Why does deep and cheap learning work so well? Journal of Statistical Physics, 168(6):1223-1247, Sep 2017.
355
+ Zhou Lu, Hongming Pu, Feicheng Wang, Zhiqiang Hu, and Liwei Wang. The expressive power of neural networks: A view from the width. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems 30, pp. 6231-6239. Curran Associates, Inc., 2017. URL http://papers.nips.cc/paper/7203-the-expressive-power-of-neural-networks-a-view-from-the-width.pdf.
356
+ V. E. Maiorov and R. Meir. On the near optimality of the stochastic approximation of smooth functions by neural networks. Adv. Comput. Math., 13(1):79-103, 2000. ISSN 1019-7168. doi: 10.1023/A:1018993908478. URL https://doi.org.library.sutd.edu.sg:2443/10.1023/A:1018993908478.
357
+ Vitaly Maiorov and Allan Pinkus. Lower bounds for approximation by mlp neural networks. Neurocomputing, 25(1):81 - 91, 1999. ISSN 0925-2312. doi: https://doi.org/10.1016/S0925-2312(98)00111-8. URL http://www.sciencedirect.com/science/article/pii/S0925231298001118.
358
+ H. N. Mhaskar. Neural networks for optimal approximation of smooth and analytic functions. Neural Computation, 8(1):164-177, Jan 1996. doi: 10.1162/neco.1996.8.1.164.
359
+ Hrushikesh Mhaskar, Qianli Liao, and Tomaso Poggio. When and why are deep networks better than shallow ones?, 2017. URL https://aaai.org/ocs/index.php/AAAI/AAAI17/paper/view/14849.
360
+ Guido Montúfar, Razvan Pascanu, Kyunghyun Cho, and Yoshua Bengio. On the number of linear regions of deep neural networks. In Proceedings of the 27th International Conference on Neural Information Processing Systems - Volume 2, NIPS'14, pp. 2924-2932, Cambridge, MA, USA, 2014. MIT Press. URL http://dl.acm.org/citation.cfm?id=2969033.2969153.
361
+ Allan Pinkus. Approximation theory of the mlp model in neural networks. ACTA NUMERICA, 8: 143-195, 1999.
362
+ A. Rahimi and B. Recht. Uniform approximation of functions with random bases. In 2008 46th Annual Allerton Conference on Communication, Control, and Computing, pp. 555-561, Sep. 2008. doi: 10.1109/ALLERTON.2008.4797607.
363
+ Ali Rahimi and Benjamin Recht. Random features for large-scale kernel machines. In Proceedings of the 20th International Conference on Neural Information Processing Systems, NIPS'07, pp. 1177-1184, USA, 2007. Curran Associates Inc. ISBN 978-1-60560-352-0. URL http://dl.acm.org/citation.cfm?id=2981562.2981710.
364
+ Theodore J. Rivlin. An introduction to the approximation of functions. Dover Publications, Inc., New York, 1981. ISBN 0-486-64069-8. Corrected reprint of the 1969 original, Dover Books on Advanced Mathematics.
365
+ Richard P. Stanley. Enumerative combinatorics. Vol. 2, volume 62 of Cambridge Studies in Advanced Mathematics. Cambridge University Press, Cambridge, 1999. ISBN 0-521-56069-1; 0-521-78987-7. doi: 10.1017/CBO9780511609589. URL https://doi.org.library.sutd.edu.sg: 2443/10.1017/CBO9780511609589. With a foreword by Gian-Carlo Rota and appendix 1 by Sergey Fomin.
366
+ Yitong Sun, Anna Gilbert, and Ambuj Tewari. On the approximation properties of random ReLU features. Preprint arXiv:1810.04374v3 [stat.ML], August 2019.
367
+ Matus Telgarsky. benefits of depth in neural networks. In Vitaly Feldman, Alexander Rakhlin, and Ohad Shamir (eds.), 29th Annual Conference on Learning Theory, volume 49 of Proceedings of Machine Learning Research, pp. 1517-1539, Columbia University, New York, New York, USA, 23-26 Jun 2016. PMLR. URL http://proceedings.mlr.press/v49/telgarsky16.html.
368
+
369
+ K. Wolsson. Linear dependence of a function set of $m$ variables with vanishing generalized Wronskians. Linear Algebra Appl., 117:73-80, 1989. ISSN 0024-3795. doi: 10.1016/0024-3795(89) 90548-X. URL https://doi-org.library.sutd.edu.sg:2443/10.1016/0024-3795(89)90548-X.
370
+ Dmitry Yarotsky. Error bounds for approximations with deep relu networks. Neural Networks, 94:103 - 114, 2017. ISSN 0893-6080. doi: https://doi.org/10.1016/j.neunet.2017.07.002. URL http://www.sciencedirect.com/science/article/pii/S0893608017301545.
371
+ Gilad Yehudai and Ohad Shamir. On the power and limitations of random features for understanding neural networks. Preprint arXiv:1904.00687v2 [stat.ML], June 2019.
372
+
373
+ # A GENERALIZED WRONSKIANS AND THE PROOF OF THEOREM 5.6
374
+
375
+ First, we recall the notion of generalized Wronskians as given in (LeVeque, 1956, Chap. 4.3). Let $\Delta_0,\ldots ,\Delta_{N - 1}$ be any $N$ differential operators of the form
376
+
377
+ $$
378
+ \Delta_ {k} = \left(\frac {\partial}{\partial x _ {1}}\right) ^ {\alpha_ {1}} \dots \left(\frac {\partial}{\partial x _ {n}}\right) ^ {\alpha_ {n}}, \text {w h e r e} \alpha_ {1} + \dots + \alpha_ {n} \leq k.
379
+ $$
380
+
381
+ Let $f_1, \ldots, f_N \in \mathcal{P}(\mathbb{R}^n)$ . The generalized Wronskian of $(f_1, \ldots, f_N)$ associated to $\Delta_0, \ldots, \Delta_{N-1}$ is defined as the determinant of the matrix $M = [\Delta_{i-1} f_j(x)]_{1 \leq i,j \leq N}$ . In general, $(f_1, \ldots, f_N)$ has multiple generalized Wronskians, corresponding to multiple choices for $\Delta_0, \ldots, \Delta_{N-1}$ .
382
+
383
+ # A.1 PROOF OF THEOREM 5.6
384
+
385
+ For brevity, let $N = \binom{n+d}{d}$ and let $\mathbf{x} = (x_1, \ldots, x_n)$ . Recall that $\lambda_1 < \cdots < \lambda_N$ are all the $n$ -tuples in $\Lambda_{\leq d}^n$ in the colexicographic order. For each $1 \leq i, k \leq N$ , write $\lambda_k = (\lambda_{k,1}, \dots, \lambda_{k,n})$ , define the differential operator $\Delta_{\lambda_k} = \left(\frac{\partial}{\partial x_1}\right)^{\lambda_{k,1}} \cdots \left(\frac{\partial}{\partial x_n}\right)^{\lambda_{k,n}}$ , and let $\alpha_{\lambda_k}^{(i)}$ be the coefficient of the monomial $q_k(\mathbf{x})$ in $\Delta_{\lambda_i} p(\mathbf{x})$ . Consider an arbitrary $W \in \mathcal{U}$ , and for each $1 \leq j \leq N$ , define $f_j \in \mathcal{P}_{\leq d}(\mathbb{R}^n)$ by the map $\mathbf{x} \mapsto p(w_{1,j}^{(1)}x_1, \ldots, w_{n,j}^{(1)}x_n)$ . Note that $\mathcal{F}_{p,\mathbf{0}_n}(W) = (f_1, \ldots, f_N)$ by definition. Next, define the matrix $M_W(\mathbf{x}) := [\Delta_if_j(x)]_{1 \leq i,j \leq N}$ , and note that $\det M_W(\mathbf{x})$ is the generalized Wronskian of $(f_1, \ldots, f_N)$ associated to $\Delta_1, \ldots, \Delta_N$ . In particular, this generalized Wronskian is well-defined, since the definition of the colexicographic order implies that $\lambda_{k,1} + \cdots + \lambda_{k,n} \leq k$ for all possible $k$ . Similar to the univariate case, $(f_1, \ldots, f_N)$ is linearly independent if (and only if) its generalized Wronskian is not the zero function (Wolsson, 1989). Thus, to show that $W \in {}^p\mathcal{U}^{\mathrm{ind}}$ , it suffices to show that the evaluation $\det M_W(\mathbf{1}_n)$ of this generalized Wronskian at $\mathbf{x} = \mathbf{1}_n$ gives a non-zero value, where $\mathbf{1}_n$ denotes the all-ones vector in $\mathbb{R}^n$ .
386
+
387
+ Observe that the $(i,j)$ -th entry of $M_W(\mathbf{1}_n)$ equals $(\widehat{\mathbf{w}}_j^{(1)})^{\lambda_i}(\Delta_{\lambda_i}p)(\widehat{\mathbf{w}}_j^{(1)})$ , hence we can check that $M_W(\mathbf{1}_n) = M'M''$ , where $M'$ is an $N$ -by- $N$ matrix whose $(i,j)$ -th entry is given by
388
+
389
+ $$
390
+ M _ {i, j} ^ {\prime} = \left\{ \begin{array}{l l} \alpha_ {\lambda_ {j} - \lambda_ {i}} ^ {(i)}, & \text {i f} \lambda_ {j} - \lambda_ {i} \in \Lambda_ {\leq d} ^ {n}; \\ 0, & \text {i f} \lambda_ {j} - \lambda_ {i} \not \in \Lambda_ {\leq d} ^ {n}; \end{array} \right.
391
+ $$
392
+
393
+ and where $M'' = Q[W]$ is the non-bias Vandermonde matrix of $W$ .
394
+
395
+ It follows from the definition of the colexicographic order that $\lambda_{j} - \lambda_{i}$ necessarily contains at least one strictly negative entry whenever $j < i$ , hence we infer that $M'$ is upper triangular. The diagonal entries of $M'$ are $\alpha_{\mathbf{0}_n}^{(1)}, \alpha_{\mathbf{0}_n}^{(2)}, \ldots, \alpha_{\mathbf{0}_n}^{(N)}$ , and note that $\alpha_{\mathbf{0}_n}^{(i)} = (\lambda_{i,1}!\cdots\lambda_{i,n!})\alpha_{\lambda_i}^{(1)}$ for each $1 \leq i \leq N$ , where $\lambda_{i,1}!\cdots\lambda_{i,n!}$ denotes the product of the factorials of the entries of the $n$ -tuple $\lambda_i$ . In particular, $\lambda_{i,1}!\cdots\lambda_{i,n!} \neq 0$ , and $\alpha_{\lambda_i}^{(1)}$ , which is the coefficient of the monomial $q_i(\mathbf{x})$ in $p(\mathbf{x})$ , is non-zero. Thus, $\operatorname*{det}(M') \neq 0$ .
396
+
397
+ We have come to the crucial step of our proof. If we can show that $\operatorname{det}(M'') = \operatorname{det}(Q[W]) \neq 0$ , then $\operatorname{det}(M_W(\mathbf{1}_n)) = \operatorname{det}(M') \operatorname{det}(M'') \neq 0$ , and hence we can infer that $W \in {}^p\mathcal{U}^{\mathrm{ind}}$ . This means that ${}^p\mathcal{U}^{\mathrm{ind}}$ contains the subset $\mathcal{U}' \subseteq \mathcal{U}$ consisting of all $W$ such that $Q[W]$ is non-singular. Note that $\operatorname{det}(Q[W])$ is a polynomial in terms of the non-bias weights in $W^{(1)}$ as its variables, so we could write this polynomial as $r = r(W)$ . Consequently, if we can find a single $W \in \mathcal{U}$ such that $Q[W]$ is non-singular, then $r(W)$ is not identically zero on $\mathcal{U}$ , which then implies that $\mathcal{U}' = \{W \in \mathcal{U} : r(W) \neq 0\}$ is dense in $\mathcal{U}$ (w.r.t. the Euclidean metric).
398
+
399
+ # A.2 PROOF OF COROLLARY 5.7
400
+
401
+ Let $N \coloneqq \binom{n+d}{d}$ . By Theorem 5.6, it suffices to show that there exists some $W \in \mathcal{W}_N^{n,m}$ such that the non-bias Vandermonde matrix of $W$ is non-singular. Consider $W \in \mathcal{W}_N^{n,m}$ such that $w_{i,j}^{(1)} = (w_{1,j}^{(1)})^{(d+1)^i}$ . Recall that the monomials in $\mathcal{M}_{\leq d}^n$ are arranged in colexicographic order, i.e.
402
+
403
+ $$
404
+ 1, x _ {1}, x _ {1} ^ {2}, \ldots , x _ {1} ^ {d}, x _ {2}, x _ {1} x _ {2}, x _ {1} ^ {2} x _ {2}, \ldots , x _ {2} ^ {2}, x _ {1} x _ {2} ^ {2}, \ldots , x _ {n} ^ {d}.
405
+ $$
406
+
407
+ Thus, there are fixed integers $0 = \beta_{1} < \beta_{2} < \dots < \beta_{N}$ , such that the $(i,j)$ -th entry of $Q[W]$ is $(w_{1,j}^{(1)})^{\beta_i}$ . Such matrices are well-studied in algebraic combinatorics, and the determinant of $Q[W]$ is a Schur polynomial; see (Stanley, 1999). In particular, if we choose positive pairwise distinct values for $w_{1,j}^{(1)}$ (for $1 \leq j \leq N$ ), then $Q[W]$ is non-singular, since a Schur polynomial can be expressed as a (non-negative) sum of certain monomials; see (Stanley, 1999, Sec. 7.10) for details.
408
+
409
+ # B AN ANALOG OF KADEC'S THEOREM AND THE PROOF OF LEMMA 5.9
410
+
411
+ Throughout this section, suppose $\sigma \in \mathcal{C}(\mathbb{R})$ and let $d\geq 1$ be an integer. We shall use the same definitions for $\{\lambda_k\}_{k\in \mathbb{N}},\{Y_k\}_{k\in \mathbb{N}}$ and $\{\sigma_k\}_{k\in \mathbb{N}}$ as given immediately after Remark 5.8. Our goal for this section is to prove Theorem B.1 below, so that we can infer Lemma 5.9 as a consequence of Theorem B.1. Note that Theorem B.1 is an analog of the well-known Kadec's theorem (Kadec, 1960) from approximation theory. To prove Theorem B.1, we shall essentially follow the proof of Kadec's theorem as given in (Kadec, 1963).
412
+
413
+ We begin with a crucial observation. For every best polynomial approximant $\sigma_{k}$ to $\sigma|_{Y_k}$ of degree $d$ , it is known that there are (at least) $d + 2$ values
414
+
415
+ $$
416
+ y _ {k} ^ {\prime} \leq a _ {0} ^ {(k)} < a _ {1} ^ {(k)} < \dots < a _ {d + 1} ^ {(k)} \leq y _ {k} ^ {\prime \prime},
417
+ $$
418
+
419
+ and some sign $\delta_{k}\in \{\pm 1\}$ , such that $\sigma (a_i^{(k)}) - \sigma_k(a_i^{(k)}) = (-1)^i\delta_kE_d(\sigma |_{Y_k})$ for all $0\leq i\leq d + 1$ ; see (Rivlin, 1981, Thm. 1.7). Define
420
+
421
+ $$
422
+ \Delta_ {k} := \max \left\{\left| \frac {a _ {i} ^ {(k)} - y _ {k} ^ {\prime}}{y _ {k} ^ {\prime \prime} - y _ {k} ^ {\prime}} - \frac {i}{d + 1} \right|: 0 \leq i \leq d + 1 \right\}.
423
+ $$
424
+
425
+ Theorem B.1. If $\lim_{k\to \infty}E_d(\sigma |_{Y_k}) = \infty$ , then for any $\gamma >0$ , we have $\lim_{k\to \infty}\inf_{k\to \infty}\frac{\Delta_k\lambda_k}{k^\gamma} = 0$
426
+
427
+ Proof. For every $k \in \mathbb{N}$ , define the functions $e_k \coloneqq \sigma - \sigma_k$ and $\phi_{k+1} \coloneqq \sigma_k - \sigma_{k+1} = e_{k+1} - e_k$ . Note that $e_k \in \mathcal{C}(\mathbb{R})$ and $\phi_{k+1} \in \mathcal{P}_{\leq d}(\mathbb{R})$ . Since $y_{k+1}' \leq a_i^{(k)} \leq y_{k+1}''$ by assumption, it follows from the definition of $\sigma_{k+1}$ that $-E_d(\sigma|_{Y_{k+1}}) \leq e_{k+1}(a_i^{(k)}) \leq E_d(\sigma|_{Y_{k+1}})$ . By the definition of $a_i^{(k)}$ , we have $e_k(a_i^{(k)}) = (-1)^i \delta_k E_d(\sigma|_{Y_k})$ . Consequently,
428
+
429
+ $$
430
+ E _ {d} (\sigma | _ {Y _ {k}}) - E _ {d} (\sigma | _ {Y _ {k + 1}}) \leq (- 1) ^ {i} \delta_ {k} (e _ {k} - e _ {k + 1}) (a _ {i} ^ {(k)}) \leq E _ {d} (\sigma | _ {Y _ {k}}) + E _ {d} (\sigma | _ {Y _ {k + 1}}),
431
+ $$
432
+
433
+ or equivalently, $-E_{d}(\sigma |_{Y_{k}}) - E_{d}(\sigma |_{Y_{k + 1}})\leq (-1)^{i}\delta_{k}\phi_{k + 1}(a_{i}^{(k)})\leq E_{d}(\sigma |_{Y_{k + 1}}) - E_{d}(\sigma |_{Y_{k}}).$
434
+
435
+ Since $Y_{k} \subseteq Y_{k + 1}$ implies $E_{d}(\sigma|_{Y_{k}}) \leq E_{d}(\sigma|_{Y_{k + 1}})$ , it follows that $a_{2i - 1} \leq a_{i}^{(k)} \leq a_{2i}$ (for each $0 \leq i \leq d + 1$ ), where $a_{2i - 1}$ and $a_{2i}$ are the roots of the equation $|\phi_{k + 1}(y)| = E_{d}(\sigma|_{Y_{k + 1}}) - E_{d}(\sigma|_{Y_{k}})$ .
436
+
437
+ If $E_{d}(\sigma|_{Y_{k+1}}) = E_{d}(\sigma|_{Y_{k}})$ , then $\sigma_{k+1} = \sigma_{k}$ by definition, so we could set $a_{i}^{(k+1)} = a_{i}^{(k)}$ for all $i$ , i.e. there is nothing to prove in this case. Henceforth, assume $E_{d}(\sigma|_{Y_{k+1}}) \neq E_{d}(\sigma|_{Y_{k}})$ , and consider the polynomial function
438
+
439
+ $$
440
+ \phi (y) := \frac {\phi_ {k + 1} \left(y - y _ {k} ^ {\prime}\right)}{E _ {d} \left(\sigma \mid_ {Y _ {k + 1}}\right) - E _ {d} \left(\sigma \mid_ {Y _ {k}}\right)}.
441
+ $$
442
+
443
+ It then follows from (Kadec, 1963, Lem. 2) that
444
+
445
+ $$
446
+ \Delta_ {k} \leq \frac {\theta}{d + 1} + \frac {1}{\lambda_ {k} \sqrt {(d + 1) \theta}} \operatorname {a r c o s h} \frac {E _ {d} \left(\sigma \mid_ {Y _ {k + 1}}\right) + E _ {d} \left(\sigma \mid_ {Y _ {k}}\right)}{E _ {d} \left(\sigma \mid_ {Y _ {k + 1}}\right) - E _ {d} \left(\sigma \mid_ {Y _ {k}}\right)}, \tag {9}
447
+ $$
448
+
449
+ where $\theta$ is an arbitrary real number satisfying $0 < \theta < \frac{1}{2}$ .
450
+
451
+ Since $\lim_{k\to \infty}E_d(\sigma |_{Y_k}) = \infty$ by assumption, the infinite product $\prod_{k = 0}^{\infty}\frac{E_d(\sigma|_{Y_{k + 1}})}{E_d(\sigma|_{Y_k})}$ diverges, and thus the series $\sum_{k = 0}^{\infty}\frac{E_d(\sigma|_{Y_{k + 1}}) - E_d(\sigma|_{Y_k})}{E_d(\sigma|_{Y_{k + 1}}) + E_d(\sigma|_{Y_k})}$ also diverges. It then follows from (9) that
452
+
453
+ $$
454
+ \sum_ {k = 0} ^ {\infty} \frac {1}{\cosh \left[ \left(\Delta_ {k} - \frac {\theta}{d + 1}\right) \lambda_ {k} \sqrt {(d + 1) \theta} \right]} = \infty ,
455
+ $$
456
+
457
+ hence $\sum_{k=0}^{\infty} \frac{1}{(\Delta_k \lambda_k)^D} = \infty$ for any $D > 1$ . If we compare the divergent series $\sum_{k=0}^{\infty} \frac{1}{(\Delta_k \lambda_k)^D}$ with the convergent series $\sum_{k=0}^{\infty} \frac{1}{k^{1+\tau}}$ (for any $\tau > 0$ ), we thus get
458
+
459
+ $$
460
+ \liminf _ {k \to \infty} \frac {\Delta_ {k} \lambda_ {k}}{k ^ {(1 + \tau) / D}} = 0.
461
+ $$
462
+
463
+ Therefore, the assertion follows by letting $\gamma = \frac{1 + \tau}{D}$ .
464
+
465
+ ![](images/63f5bb710d6d343a7d28106e8d503df918dadd9c25e3ae7e60eeecc227b1487d.jpg)
466
+
467
+ Proof of Lemma 5.9. Fix $\varepsilon >0$ . By Theorem B.1, we have $\liminf_{k\to \infty}\frac{\Delta_k\lambda_k}{k^\gamma} = 0$ for any $\gamma >0$ . Thus, by the definition of lim inf, there exists a subsequence $\{k_t^{\prime}\}_{t\in \mathbb{N}}$ of $\mathbb{N}$ such that
468
+
469
+ $$
470
+ \left| \frac {\Delta_ {k _ {t} ^ {\prime}} \lambda_ {k _ {t} ^ {\prime}}}{(k _ {t} ^ {\prime}) ^ {\gamma}} \right| < \varepsilon
471
+ $$
472
+
473
+ for all $t \in \mathbb{N}$ (given any $\gamma > 0$ ). Since $\lambda_{k}$ is at least $\Omega(k^{\gamma})$ for some $\gamma > 0$ , we can use this particular $\gamma$ to get that $\lim_{t \to \infty} \frac{\lambda_{k_t'}}{(k_t')^\gamma} > 0$ . Consequently, there is a subsequence $\{k_t\}_{t \in \mathbb{N}}$ of $\{k_t'\}_{t \in \mathbb{N}}$ such that $|\Delta_{k_t}| < \varepsilon$ for all $t \in \mathbb{N}$ . Since $d \geq 2$ by assumption, it then follows that
474
+
475
+ $$
476
+ \frac {1}{d + 1} - \varepsilon < \frac {a _ {1} ^ {\left(k _ {t}\right)} - y _ {k _ {t}} ^ {\prime}}{\lambda_ {k _ {t}}} < \frac {a _ {2} ^ {\left(k _ {t}\right)} - y _ {k _ {t}} ^ {\prime}}{\lambda_ {k _ {t}}} < \frac {d}{d + 1} + \varepsilon . \tag {10}
477
+ $$
478
+
479
+ Now $\sigma -\sigma_{k_t}$ is continuous, so by the definition of $a_i^{(k_t)}$ , there is some $a_1^{(k_t)} < y_{k_t} < a_2^{(k_t)}$ such that $\sigma (y_{k_t}) = \sigma_{k_t}(y_{k_t})$ . From (10), we thus infer that $\frac{\min\{|y_{k_t} - y_{k_t}'|,|y_{k_t} - y_{k_t}''|\}}{\lambda_{k_t}} >\frac{1}{d + 1} -\varepsilon$ as desired.
480
+
481
+ # C PROOFS OF REMAINING LEMMAS
482
+
483
+ # C.1 PROOF OF LEMMA 5.10
484
+
485
+ Theorem 2.2 gives $\| \sigma_k - \sigma \|_{\infty, Y_k} = E_d(\sigma|_{Y_k}) \leq 6\omega_{\sigma|_{Y_k}}(\frac{\lambda_k}{2d})$ . Recall that any modulus of continuity $\omega_f$ is subadditive (i.e. $\omega_f(x + y) \leq \omega_f(x) + \omega_f(y)$ for all $x, y$ ); see (Rivlin, 1981, Chap. 1). Thus for fixed $d$ , we have $\omega_{\sigma|_{Y_k}}(\frac{\lambda_k}{2d}) \in \mathcal{O}(\lambda_k)$ , which implies $(k \mapsto \| \sigma_k - \sigma \|_{\infty, Y_k}) \in o(\lambda_k^{1 + \gamma})$ .
486
+
487
+ # C.2 PROOF OF LEMMA 5.11
488
+
489
+ Our proof of Lemma 5.11 is a straightforward application of both the Cayley-Menger determinant formula and the Leibniz determinant formula. For each $0 \leq i \leq N$ , let $\widehat{S}_i(\lambda) := S(\lambda) \setminus \{p_i(\lambda)\}$ , and let $\widehat{\Delta}_i(\lambda)$ be the convex hull of $\widehat{S}_i(\lambda)$ . Let $\mathcal{V}(\Delta(\lambda))$ (resp. $\mathcal{V}(\widehat{\Delta}_i(\lambda))$ ) denote the $N$ -dimensional (resp. $(N-1)$ -dimensional) volume of $\Delta(\delta)$ (resp. $\widehat{\Delta}_i(\lambda)$ ). Define the $(N+2)$ -by- $(N+2)$ matrix $M(\lambda) = [M_{i,j}(\lambda)]_{0 \leq i,j \leq N+1}$ as follows: $M_{i,j}(\lambda) = \| p_i(\lambda) - p_j(\lambda) \|_2^2$ for all $0 \leq i, j \leq N$ ; $M_{N+1,i}(\lambda) = M_{i,N+1}(\lambda) = 1$ for all $0 \leq i \leq N$ ; and $M_{N+1,N+1}(\lambda) = 0$ .
490
+
491
+ The Cayley-Menger determinant formula gives $[\mathcal{V}(\Delta (\lambda))]^2 = \frac{(-1)^{N + 1}}{(N!)^22^N}\operatorname *{det}(M(\lambda))$ . Analogously, if we let $M^{\prime}(\lambda)$ be the square submatrix of $M(\lambda)$ obtained by deleting the first row and column from
492
+
493
+ $M(\lambda)$ , then $[\mathcal{V}(\widehat{\Delta}_0(\lambda))]^2 = \frac{(-1)^N}{((N - 1)!)^22^{N - 1}}\operatorname*{det}(M'(\lambda))$ . Now, $\mathcal{V}(\Delta (\lambda)) = \frac{1}{N}\mathcal{V}(\widehat{\Delta}_0(\lambda))h_0(\lambda)$ , so
494
+
495
+ $$
496
+ \left[ h _ {0} (\lambda) \right] ^ {2} = \frac {- 1}{2 N} \frac {\det (M (\lambda))}{\det M ^ {\prime} (\lambda)}. \tag {11}
497
+ $$
498
+
499
+ Without loss of generality, assume that $r_0 \geq r_1 \geq \ldots$ . Also, for any integer $k \geq 0$ , let $\mathfrak{S}_k$ be the set of all permutations on $\{0, \ldots, k\}$ , and let $\mathfrak{S}_k'$ be the subset of $\mathfrak{S}_k$ consisting of all permutations that are not derangements. (Recall that $\tau \in \mathfrak{S}_k$ is called a derangement if $\tau(i) \neq i$ for all $0 \leq i \leq k$ .) The diagonal entries of $M(\lambda)$ are all zeros, so by the Leibniz determinant formula, we get
500
+
501
+ $$
502
+ \det (M(\lambda)) = \sum_{\tau \in \mathfrak{S}^{\prime}_{N + 1}}\operatorname {sgn}(\tau)\prod_{0\leq i\leq N + 1}M_{i,\tau (i)}(\lambda),
503
+ $$
504
+
505
+ where $\operatorname{sgn}(\tau)$ denotes the sign of the permutation $\tau$ . Note that $M_{i,j}(\lambda) \in \Theta(\lambda^{2\max\{r_i, r_j\}})$ for all $0 \leq i, j \leq N$ satisfying $i \neq j$ . (Here, $\Theta$ refers to $\Theta$ -complexity.) Consequently, using the fact that $M_{i,N+1}(\lambda) = M_{N+1,i} = 1$ for all $0 \leq i \leq N$ , we get that $\operatorname{det}(M(\lambda)) \in \Theta(\lambda^{2R_N})$ , where
506
+
507
+ $$
508
+ R _ {N} = \left\{ \begin{array}{l l} 2 r _ {0} + \dots + 2 r _ {(N - 2) / 2} = 2 \sum_ {t = 0} ^ {(N - 2) / 2} r _ {t}, & \text {i f N i s e v e n ;} \\ 2 r _ {0} + \dots + 2 r _ {(N - 3) / 2} + r _ {(N - 1) / 2} = - r _ {(N - 1) / 2} + \sum_ {t = 0} ^ {(N - 1) / 2} r _ {t}; & \text {i f N i s o d d .} \end{array} \right.
509
+ $$
510
+
511
+ The even case corresponds to the derangement $\tau \in \mathfrak{S}_{N + 1}$ given by $\tau (i) = N - i$ for $0\leq i\leq \frac{N - 2}{2}$ , $\tau (\frac{N}{2}) = N + 1$ , $\tau (N + 1) = \frac{N}{2}$ ; while the odd case corresponds to the derangement $\tau \in \mathfrak{S}_{N + 1}$ given by $\tau (i) = N - i$ for $0\leq i\leq \frac{N - 3}{2}$ , $\tau (\frac{N - 1}{2}) = \frac{N + 1}{2}$ , $\tau (\frac{N + 1}{2}) = N + 1$ , $\tau (N + 1) = \frac{N - 1}{2}$ . A formula for $\operatorname*{det}(M'(\lambda))$ can be analogously computed. Consequently, it follows from (11) that $[h_0(\lambda)]^2\in \Theta \bigl (\lambda^{2[2r_0 - r_{\lfloor N / 2\rfloor}]}\bigr)$ . Now, $r_0\geq r_{\lfloor N / 2\rfloor}$ by assumption, and $r_0$ (being the largest) must satisfy $r_0 > r_{\min}$ , thus $h_0(\lambda)\in \Omega (\lambda^{r_0})$ , and the assertion follows by taking $\gamma = r_0 - r_{\min}$ .
512
+
513
+ # C.3 PROOF OF LEMMA 5.12
514
+
515
+ Consider any open neighborhood $U$ of $\mathbf{0}_M$ . Since $\varphi$ is open and $\varphi(\mathbf{0}_M) = \mathbf{0}_N$ , the image $\varphi(U)$ must contain an open neighborhood of $\mathbf{0}_N$ . Thus for any $\varepsilon > 0$ , we can always choose $N + 1$ points $w_0, \ldots, w_N$ in $B_{\varepsilon}^M \setminus \{\mathbf{0}_M\}$ , such that the convex hull of $\{\varphi(w_0), \ldots, \varphi(w_N)\}$ contains the point $\mathbf{0}_N$ . Since $\varphi(\lambda x) \geq \lambda \varphi(x)$ for all $x \in \mathbb{R}^M$ , $\lambda > 0$ , and since $\varphi$ is continuous, it then follows from definition that for every $k \in \mathbb{N}$ , we can choose $N + 1$ points $u_0^{(k)}, \ldots, u_N^{(k)}$ in $U_k$ , such that the convex hull of $U_k' := \{\varphi(u_0^{(k)}), \ldots, \varphi(u_N^{(k)})\}$ contains $\mathbf{0}_N$ . Define $r_k := \sup \{r > 0 : B_r^N \subseteq \varphi(B_{\lambda_k}^m)\}$ for each $k \in \mathbb{N}$ , and note also that $\lim_{k \to \infty} r_k = \infty$ . Thus, given a ball $B_r^N$ of any desired radius, there is some (sufficiently large) $k$ such that the convex hull of $U_k'$ contains $B_r^N$ .
516
+
517
+ Now, since $\theta \lambda_{k} < \| u_{j}^{(k)}\|_{2}\leq \lambda_{k}$ and $\varphi (\lambda u_j^{(k)})\geq \lambda \varphi (u_j^{(k)})$ for all $0\le j\le N,\lambda >0$ , we infer that none of the points $\varphi (u_0^{(k)}),\ldots ,\varphi (u_N^{(k)})$ are contained in the ball $B_{\theta r_k}^N$ . Consequently, as $k\to \infty$ we have $\theta r_k\rightarrow \infty$ , and therefore the barycentric coordinate vector $(b_{0},\dots,b_{N})$ (w.r.t. $U_{k}^{\prime}$ ) of every point in the fixed ball $B_{\tau}^{N}$ would converge to $\left(\frac{1}{N},\dots,\frac{1}{N}\right)$ (which is the barycentric coordinate vector of the barycenter w.r.t. $U_{k}^{\prime}$ ); this proves our assertion.
518
+
519
+ # D CONJECTURED OPTIMALITY OF UPPER BOUND $\mathcal{O}(\varepsilon^{-n})$ IN THEOREM 3.2
520
+
521
+ It was conjectured by Mhaskar (1996) that there exists some smooth non-polynomial function $\sigma$ , such that at least $\Omega(\varepsilon^{-n})$ hidden units is required to uniformly approximate every function in the class $\mathfrak{S}$ of $C^1$ functions with bounded Sobolev norm. As evidence that this conjecture is true, a heuristic argument was provided in (Mhaskar, 1996), which uses a result by DeVore et al. (1989); cf. (Pinkus, 1999, Thm. 6.5). To the best of our knowledge, this conjecture remains open. If this conjecture is indeed true, then our upper bound $\mathcal{O}(\varepsilon^{-n})$ in Theorem 3.2 is optimal for general continuous non-polynomial activation functions.
522
+
523
+ For specific activation functions, such as the logistic sigmoid function, or any polynomial spline function of fixed degree with finitely many knots (e.g. the ReLU function), it is known that the minimum number $N$ of hidden units required to uniformly approximate every function in $\mathfrak{S}$ must satisfy $(N\log N)\in \Omega (\varepsilon^{-n})$ (Maiorov & Meir, 2000); cf. (Pinkus, 1999, Thm. 6.7). Hence there is still a gap between the lower and upper bounds for $N$ in these specific cases. It would be interesting to find optimal bounds for these cases.
acloserlookattheapproximationcapabilitiesofneuralnetworks/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5ec8202f62ba8b192213721182a95c242b3e890d7eb314f5efc98ac4ba03dcb
3
+ size 280993
acloserlookattheapproximationcapabilitiesofneuralnetworks/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07bf4de078c567a3b6345a6f745b020fdd3a88307325ba44b16a13609b3e8470
3
+ size 1400575
acloserlookattheoptimizationlandscapesofgenerativeadversarialnetworks/57d54d98-a9af-48c4-a5a7-d695a362efd9_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37d7ffec36ca055ebb3678afb6620dab8d735f5ce4d5546386a341435fb8a65d
3
+ size 113235
acloserlookattheoptimizationlandscapesofgenerativeadversarialnetworks/57d54d98-a9af-48c4-a5a7-d695a362efd9_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be94f543bce0209b892fcb3bcbad31086efafc4d1ac185cae15c75602bf1e82d
3
+ size 135900
acloserlookattheoptimizationlandscapesofgenerativeadversarialnetworks/57d54d98-a9af-48c4-a5a7-d695a362efd9_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d424e644d99e09e678dbb25eb0499d9f312fd089bea01d84f2916dc353c8628b
3
+ size 1839293
acloserlookattheoptimizationlandscapesofgenerativeadversarialnetworks/full.md ADDED
@@ -0,0 +1,529 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A CLOSER LOOK AT THE OPTIMIZATION LANDSCAPES OF GENERATIVE ADVERSARIAL NETWORKS
2
+
3
+ Hugo Berard*
4
+
5
+ Mila, Université de Montréal
6
+
7
+ Facebook AI Research
8
+
9
+ Gauthier Gidel*
10
+
11
+ Mila, Université de Montréal
12
+
13
+ Element AI
14
+
15
+ Amjad Almahairi
16
+
17
+ Element AI
18
+
19
+ Pascal Vincent†
20
+
21
+ Mila, Université de Montréal
22
+
23
+ Facebook AI Research
24
+
25
+ Simon Lacoste-Julien
26
+
27
+ Mila, Université de Montréal
28
+
29
+ Element AI
30
+
31
+ # ABSTRACT
32
+
33
+ Generative adversarial networks have been very successful in generative modeling, however they remain relatively challenging to train compared to standard deep neural networks. In this paper, we propose new visualization techniques for the optimization landscapes of GANs that enable us to study the game vector field resulting from the concatenation of the gradient of both players. Using these visualization techniques we try to bridge the gap between theory and practice by showing empirically that the training of GANs exhibits significant rotations around Local Stable Stationary Points (LSSP), similar to the one predicted by theory on toy examples. Moreover, we provide empirical evidence that GAN training converge to a stable stationary point which is a saddle point for the generator loss, not a minimum, while still achieving excellent performance.<sup>1</sup>
34
+
35
+ # 1 INTRODUCTION
36
+
37
+ Deep neural networks have exhibited remarkable success in many applications (Krizhevsky et al., 2012). This success has motivated many studies of their non-convex loss landscape (Choromanska et al., 2015; Kawaguchi, 2016; Li et al., 2018b), which, in turn, has led to many improvements, such as better initialization and optimization methods (Glorot and Bengio, 2010; Kingma and Ba, 2015).
38
+
39
+ While most of the work on studying non-convex loss landscapes has focused on single objective minimization, some recent class of models require the joint minimization of several objectives, making their optimization landscape intrinsically different. Among these models is the generative adversarial network (GAN) (Goodfellow et al., 2014) which is based on a two-player game formulation and has achieved state-of-the-art performance on some generative modeling tasks such as image generation (Brock et al., 2019).
40
+
41
+ On the theoretical side, many papers studying multi-player games have argued that one main optimization issue that arises in this case is the rotation due to the adversarial component of the game (Mescheder et al., 2018; Balduzzi et al., 2018; Gidel et al., 2019b). This has been extensively studied on toy examples, in particular on the so-called bilinear example (Goodfellow, 2016) (a.k.a Dirac GAN (Mescheder et al., 2018)). However, those toy examples are very far from the standard realistic setting of image generation involving deep networks and challenging datasets. To our knowledge it remains an open question if this rotation phenomenon actually occurs when training GANs in more practical settings.
42
+
43
+ In this paper, we aim at closing this gap between theory and practice. Following Mescheder et al. (2017) and Balduzzi et al. (2018), we argue that instead of studying the loss surface, we should study the game vector field (i.e., the concatenation of each player's gradient), which can provide
44
+
45
+ better insights to the problem. To this end, we propose a new visualization technique that we call Path-angle which helps us observe the nature of the game vector field close to a stationary point for high dimensional models, and carry on an empirical investigation of the properties of the optimization landscape of GANs. The core questions we want to address may be summarized as the following:
46
+
47
+ Is rotation a phenomenon that occurs when training GANs on real world datasets, and do existing training methods find local Nash equilibria?
48
+
49
+ To answer this question we conducted extensive experiments by training different GAN formulations (NSGAN and WGAN-GP) with different optimizers (Adam and ExtraAdam) on three datasets (MoG, MNIST and CIFAR10). Based on our experiments and using our visualization techniques we observe that the landscape of GANs is fundamentally different from the standard loss surfaces of deep networks. Furthermore, we provide evidence that existing GAN training methods do not converge to a local Nash equilibrium.
50
+
51
+ Contributions More precisely, our contributions are the following: (i) We propose studying empirically the game vector field (as opposed to studying the loss surfaces of each player) to understand training dynamics in GANs using a novel visualization tool, which we call Path-angle and that captures the rotational and attractive behaviors near local stationary points (ref. §4.2). (ii) We observe experimentally on both a mixture of Gaussians, MNIST and CIFAR10 datasets that a variety of GAN formulations have a significant rotational behavior around their locally stable stationary points (ref. §5.1). (iii) We provide empirical evidence that existing training procedures find stable stationary points that are saddle points, not minima, for the loss function of the generator (ref. § 5.2).
52
+
53
+ # 2 RELATED WORK
54
+
55
+ Improving the training of GANs has been an active research area in the past few years. Most efforts in stabilizing GAN training have focused on formulating new objectives (Arjovsky et al., 2017), or adding regularization terms (Gulrajani et al., 2017; Mescheder et al., 2017; 2018). In this work, we try to characterize the difference in the landscapes induced by different GAN formulations and how it relates to improving the training of GANs.
56
+
57
+ Recently, Nagarajan and Kolter (2017); Mescheder et al. (2018) show that a local analysis of the eigenvalues of the Jacobian of the game can provide guarantees on local stability properties. However, their theoretical analysis is based on some unrealistic assumptions such as the generator's ability to fully capture the real distribution. In this work, we assess experimentally to what extent these theoretical stability results apply in practice.
58
+
59
+ Rotations in differentiable games has been mentioned and interpreted by (Mescheder et al., 2018; Balduzzi et al., 2018) and Gidel et al. (2019b). While these papers address rotations in games from a theoretical perspective, it was never shown that GANs, which are games with highly non-convex losses, suffered from these rotations in practice. To our knowledge, trying to quantify that GANs actually suffer from this rotational component in practice for real world dataset is novel.
60
+
61
+ The stable points of the gradient dynamics in general games have been studied independently by Mazumdar and Ratliff (2018) and Adolphs et al. (2018). They notice that the locally stable stationary point of some games are not local Nash equilibria. In order to reach a local Nash equilibrium, Adolphs et al. (2018); Mazumdar et al. (2019) develop techniques based on second order information. In this work, we argue that reaching local Nash equilibria may not be as important as one may expect and that we do achieve good performance at a locally stable stationary point.
62
+
63
+ Several works have studied the loss landscape of deep neural networks. Goodfellow et al. (2015) proposed to look at the linear path between two points in parameter space and show that neural networks behave similarly to a convex loss function along this path. Draxler et al. (2018) proposed an extension where they look at nonlinear paths between two points and show that local minima are connected in deep neural networks. Another extension was proposed by (Li et al., 2018a) where they use contour plots to look at the 2D loss surface defined by two directions chosen appropriately. In this paper, we use a similar approach of following the linear path between two points to gain insight about GAN optimization landscapes. However, in this context, looking at the loss of both players along that path may be uninformative. We propose instead to look, along a linear path from initialization to best solution, at the game vector field, particularly at its angle w.r.t. the linear path, the Path-angle.
64
+
65
+ Another way to gain insight into the landscape of deep neural networks is by looking at the Hessian of the loss; this was done in the context of single objective minimization by (Dauphin et al., 2014; Sagun et al., 2016; 2017; Alain et al., 2019). Compared to linear path visualizations which can give global information (but only along one direction), the Hessian provides information about the loss landscape in several directions but only locally. The full Hessian is expensive to compute and one often has to resort to approximations such as computing only the top-k eigenvalues. While, the Hessian is symmetric and thus has real eigenvalues, the Jacobian of a game vector field is significantly different since it is in general not symmetric, which means that the eigenvalues belong to the complex plane. In the context of GANs, Mescheder et al. (2017) introduced a gradient penalty and use the eigenvalues of the Jacobian of the game vector field to show its benefits in terms of stability. In our work, we compute these eigenvalues to assess that, on different GAN formulations and datasets, existing training procedures find a locally stable stationary point that is a saddle point for the loss function of the generator.
66
+
67
+ # 3 FORMULATIONS FOR GAN OPTIMIZATION AND THEIR PRACTICAL IMPLICATIONS
68
+
69
+ # 3.1 THE STANDARD GAME THEORY FORMULATION
70
+
71
+ From a game theory point of view, GAN training may be seen as a game between two players: the discriminator $D_{\varphi}$ and the generator $G_{\theta}$ , each of which is trying to minimize its loss $\mathcal{L}_D$ and $\mathcal{L}_G$ , respectively. Using the same formulation as Mescheder et al. (2017), the GAN objective takes the following form (for simplicity of presentation, we focus on the unconstrained formulation):
72
+
73
+ $$
74
+ \boldsymbol {\theta} ^ {*} \in \underset {\boldsymbol {\theta} \in \mathbb {R} ^ {p}} {\arg \min } \mathcal {L} _ {G} (\boldsymbol {\theta}, \boldsymbol {\varphi} ^ {*}) \quad \text {a n d} \quad \boldsymbol {\varphi} ^ {*} \in \underset {\boldsymbol {\varphi} \in \mathbb {R} ^ {d}} {\arg \min } \mathcal {L} _ {D} (\boldsymbol {\theta} ^ {*}, \boldsymbol {\varphi}). \tag {1}
75
+ $$
76
+
77
+ The solution $(\theta^{*},\varphi^{*})$ is called a Nash equilibrium (NE). In practice, the considered objectives are non-convex and we typically cannot expect better than a local Nash equilibrium (LNE), i.e. a point at which (1) is only locally true (see e.g. (Adolphs et al., 2018) for a formal definition). Ratliff et al. (2016) derived some derivative-based necessary and sufficient conditions for being a LNE. They show that, for being a local NE it is sufficient to be a differential Nash equilibrium:
78
+
79
+ Definition 1 (Differential NE). A point $(\theta^{*},\varphi^{*})$ is a differential Nash equilibrium (DNE) iff
80
+
81
+ $$
82
+ \left\| \nabla_ {\boldsymbol {\theta}} \mathcal {L} _ {G} \left(\boldsymbol {\theta} ^ {*}, \varphi^ {*}\right) \right\| = \left\| \nabla_ {\boldsymbol {\varphi}} \mathcal {L} _ {D} \left(\boldsymbol {\theta} ^ {*}, \varphi^ {*}\right) \right\| = 0, \quad \nabla_ {\boldsymbol {\theta}} ^ {2} \mathcal {L} _ {G} \left(\boldsymbol {\theta} ^ {*}, \varphi^ {*}\right) \succ 0 a n d \nabla_ {\boldsymbol {\varphi}} ^ {2} \mathcal {L} _ {D} \left(\boldsymbol {\theta} ^ {*}, \varphi^ {*}\right) \succ 0 \tag {2}
83
+ $$
84
+
85
+ where $S\succ 0$ if and only if $S$ is positive definite.
86
+
87
+ Being a DNE is not necessary for being a LNE because a local Nash equilibrium may have Hessians that are only semi-definite. NE are commonly used in GANs to describe the goal of the learning procedure (Goodfellow et al., 2014): in this definition, $\pmb{\theta}^{*}$ (resp. $\varphi^{*}$ ) is seen as a local minimizer of $\mathcal{L}_G(\cdot ,\varphi^*)$ (resp. $\mathcal{L}_D(\pmb {\theta}^*,\cdot)$ ).
88
+
89
+ Under this view, however, the interaction between the two networks is not taken into account. This is an important aspect of the game stability that is missed in the definition of DNE (and Nash equilibrium in general). We illustrate this point in the following section, where we develop an example of a game for which gradient methods converge to a point which is a saddle point for the generator's loss and thus not a DNE for the game.
90
+
91
+ # 3.2 AN ALTERNATIVE FORMULATION BASED ON THE GAME VECTOR FIELD
92
+
93
+ In practice, GANs are trained using first order methods that compute the gradients of the losses of each player. Following Gidel et al. (2019a), an alternative point of view on optimizing GANs is to jointly consider the players' parameters $\theta$ and $\varphi$ as a joint state $\omega \coloneqq (\theta ,\varphi)$ , and to study the vector field associated with these gradients, which we call the game vector field
94
+
95
+ $$
96
+ \boldsymbol {v} (\boldsymbol {\omega}) := \left[ \begin{array}{l l} \nabla_ {\boldsymbol {\theta}} \mathcal {L} _ {G} (\boldsymbol {\omega}) ^ {\top} & \nabla_ {\boldsymbol {\varphi}} \mathcal {L} _ {D} (\boldsymbol {\omega}) ^ {\top} \end{array} \right] ^ {\top} \quad \text {w h e r e} \quad \boldsymbol {\omega} := (\boldsymbol {\theta}, \boldsymbol {\varphi}). \tag {3}
97
+ $$
98
+
99
+ <table><tr><td>Zero-sum game</td><td>Non-zero-sum game</td></tr><tr><td>NE ⇒ LSSE (Mescheder et al., 2018)</td><td>NE ≠ LSSE (Example 2, §A.2)</td></tr><tr><td>NE ≠ LSSE (Adolphs et al., 2018)</td><td>NE ≠ LSSE (Example 1)</td></tr></table>
100
+
101
+ Table 1: Summary of the implications between Differentiable Nash Equilibrium (DNE) and a locally stable stationary point (LSSP): in general, being a DNE is neither necessary or sufficient for being a LSSP.
102
+
103
+ With this perspective, the notion of DNE is replaced by the notion of locally stable stationary point (LSSP). Verhulst (1989, Theorem 7.1) defines a LSSP $\omega^{*}$ using the eigenvalues of the Jacobian of the game vector field $\nabla v(\omega^{*})$ at that point.
104
+
105
+ Definition 2 (LSSP). A point $\omega^{*}$ is a locally stable stationary point (LSSP) iff
106
+
107
+ $$
108
+ \boldsymbol {v} \left(\boldsymbol {\omega} ^ {*}\right) = 0 \quad a n d \quad \Re (\lambda) > 0, \quad \forall \lambda \in \operatorname {S p} \left(\nabla \boldsymbol {v} \left(\boldsymbol {\omega} ^ {*}\right)\right). \tag {4}
109
+ $$
110
+
111
+ where $\Re$ denote the real part of the eigenvalue $\lambda$ belonging to the spectrum of $\nabla \pmb{v}(\pmb{\omega}^{*})$ .
112
+
113
+ This definition is not easy to interpret but one can intuitively understand a LSSP as a stationary point (a point $\omega^{*}$ where $\pmb{v}(\pmb{\omega}^{*}) = 0$ ) to which all neighbouring points are attracted. We will formalize this intuition of attraction in Proposition 1. In our two-player game setting, the Jacobian of the game vector field around the LSSP has the following block-matrices form:
114
+
115
+ $$
116
+ \nabla \boldsymbol {v} \left(\boldsymbol {\omega} ^ {*}\right) = \left[ \begin{array}{l l} \nabla_ {\boldsymbol {\theta}} ^ {2} \mathcal {L} _ {G} \left(\boldsymbol {\omega} ^ {*}\right) & \nabla_ {\boldsymbol {\varphi}} \nabla_ {\boldsymbol {\theta}} \mathcal {L} _ {G} \left(\boldsymbol {\omega} ^ {*}\right) \\ \nabla_ {\boldsymbol {\theta}} \nabla_ {\boldsymbol {\varphi}} \mathcal {L} _ {D} \left(\boldsymbol {\omega} ^ {*}\right) & \nabla_ {\boldsymbol {\varphi}} ^ {2} \mathcal {L} _ {D} \left(\boldsymbol {\omega} ^ {*}\right) \end{array} \right] = \left[ \begin{array}{c c} \boldsymbol {S} _ {1} & \boldsymbol {B} \\ \boldsymbol {A} & \boldsymbol {S} _ {2} \end{array} \right]. \tag {5}
117
+ $$
118
+
119
+ When $\boldsymbol{B} = -\boldsymbol{A}^{\top}$ , being a DNE is a sufficient condition for being of LSSP (Mazumdar and Ratliff, 2018). However, some LSSP may not be DNE (Adolphs et al., 2018), meaning that the optimal generator $\theta^{*}$ could be a saddle point of $\mathcal{L}_G(\cdot, \varphi^*)$ , while the optimal joint state $(\theta^{*}, \varphi^{*})$ may be a LSSP of the game. We summarize these properties in Table 1. In order to illustrate the intuition behind this counter-intuitive fact, we study a simple example where the generator is 2D and the discriminator is 1D.
120
+
121
+ Example 1. Let us consider $\mathcal{L}_G$ as a hyperbolic paraboloid (a.k.a., saddle point function) centered in $(1,1)$ where $(1,\varphi)$ is the principal descent direction and $(- \varphi, 1)$ is the principal ascent direction, while $\mathcal{L}_D$ is a simple bilinear objective.
122
+
123
+ $$
124
+ \mathcal {L} _ {G} (\theta_ {1}, \theta_ {2}, \varphi) = (\theta_ {2} - \varphi \theta_ {1} - 1) ^ {2} - \frac {1}{2} (\theta_ {1} + \varphi \theta_ {2} - 1) ^ {2}, \mathcal {L} _ {D} (\theta_ {1}, \theta_ {2}, \varphi) = \varphi (5 \theta_ {1} + 4 \theta_ {2} - 9)
125
+ $$
126
+
127
+ We plot $\mathcal{L}_G$ in Fig. 1b. Note that the discriminator $\varphi$ controls the principal descent direction of $\mathcal{L}_G$ .
128
+
129
+ We show (see § A.2) that $(\theta_1^*, \theta_2^*, \varphi^*) = (1, 1, 0)$ is a locally stable stationary point but is not a DNE: the generator loss at the optimum $(\theta_1, \theta_2) \mapsto \mathcal{L}_G(\theta_1, \theta_2, \varphi^*) = \theta_2^2 - \frac{1}{2} \theta_1^2$ is not at a DNE because it has a clear descent direction, $(1, 0)$ . However, if the generator follows this descent direction, the dynamics will remain stable because the discriminator will update its parameter, rotating the saddle and making $(1, 0)$ an ascent direction. We call this phenomenon dynamic stability: the loss $\mathcal{L}_G(\cdot, \varphi^*)$ is unstable for a fixed $\varphi^*$ but becomes stable when $\varphi$ dynamically interacts with the generator around $\varphi^*$ .
130
+
131
+ A mechanical analogy for this dynamic stability phenomenon is a ball in a rotating saddle—even though the gravity pushes the ball to escape the saddle, a quick enough rotation of the saddle would trap the ball at the center (see (Thompson et al., 2002) for more details). This analogy has been used to explain Paul's trap (Paul, 1990): a counter-intuitive way to trap ions using a dynamic electric field. In Example 1, the parameter $\varphi$ explicitly controls the rotation of the saddle.
132
+
133
+ This example illustrates the fact that the DNE corresponds to a notion of static stability: it is the stability of one player's loss given the other player is fixed. Conversely, LSSP captures a notion of dynamic stability that considers both players jointly.
134
+
135
+ By looking at the game vector field we capture these interactions. Fig. 1b only captures a snapshot of the generator's loss surface for a fixed $\varphi$ and indicates static instability (the generator is at a saddle point of its loss). In Fig. 1a, however, one can see that, starting from any point, we will rotate around the stationary point $(\varphi^{*},\theta_{1}^{*}) = (0,1)$ and eventually converge to it.
136
+
137
+ The visualization of the game vector field reveals an interesting behavior that does not occur in single objective minimization: close to a LSSP, the parameters rotate around it. Understanding this phenomenon is key to grasp the optimization difficulties arising in games. In the next section, we
138
+
139
+ ![](images/e15cf137aebe6547d11b89680c5c93e78e875b6f904b11fe81843f4ad2e330ef.jpg)
140
+ (a) 2D projection of the vector field.
141
+
142
+ ![](images/b1c2d1b6011588e7940003098c999770b2c880a07acd59163a8ed6747d248245.jpg)
143
+ (b) Landscape of the generator loss.
144
+ Figure 1: Visualizations of Example 1. Left: projection of the game vector field on the plane $\theta_{2} = 1$ . Right: Generator loss. The descent direction is $(1, \varphi)$ (in grey). As the generator follows this descent direction, the discriminator changes the value of $\varphi$ , making the saddle rotate, as indicated by the circular black arrow.
145
+
146
+ formally characterize the notion of rotation around a LSSP and in §4 we develop tools to visualize it in high dimensions. Note that gradient methods may converge to saddle points in single objective minimization, but these are not stable stationary points, unlike in our game example.
147
+
148
+ # 3.3 ROTATION AND ATTRACTION AROUND LOCALLY STABLE STATIONARY POINTS IN GAMES
149
+
150
+ In this section, we formalize the notions of rotation and attraction around LSSP in games, which we believe may explain some difficulties in GAN training. The local stability of a LSSP is characterized by the eigenvalues of the Jacobian $\nabla v(\omega^{*})$ because we can linearize $v(\omega)$ around $\omega^{*}$ :
151
+
152
+ $$
153
+ \boldsymbol {v} (\boldsymbol {\omega}) \approx \nabla \boldsymbol {v} \left(\boldsymbol {\omega} ^ {*}\right) \left(\boldsymbol {\omega} - \boldsymbol {\omega} ^ {*}\right). \tag {6}
154
+ $$
155
+
156
+ If we assume that (6) is an equality, we have the following theorem.
157
+
158
+ Proposition 1. Let us assume that (6) is an equality and that $\nabla \pmb{v}(\pmb{\omega}^{*})$ is diagonalizable, then there exists a basis $\pmb{P}$ such that the coordinates $\tilde{\omega}_{j}(t)\coloneqq [P(\pmb {\omega}(t) - \pmb{\omega}^{*})]_{j}$ where $\pmb {\omega}(t)$ is a solution of (6) have the following behavior: for $\lambda_{j}\in \mathrm{Sp}\nabla \pmb {v}(\pmb{\omega}^{*})$ we have,
159
+
160
+ 1. If $\lambda_{j}\in \mathbb{R}$ , we observe pure attraction: $\tilde{\omega}_j(t) = e^{-\lambda_jt}\tilde{\omega}_j(0)$ .
161
+ 2. If $\Re (\lambda_j) = 0$ , we observe pure rotation: $\begin{bmatrix} \tilde{\omega}_j(t) \\ \tilde{\omega}_{j+1}(t) \end{bmatrix} = \begin{bmatrix} \cos |\lambda_j t| & \sin |\lambda_j t| \\ -\sin |\lambda_j t| & \cos |\lambda_j t| \end{bmatrix} \begin{bmatrix} \tilde{\omega}_j(0) \\ \tilde{\omega}_{j+1}(0) \end{bmatrix}$ .
162
+ 3. Otherwise, we observe both: $\left[ \begin{array}{l}\tilde{\omega}_j(t)\\ \tilde{\omega}_{j + 1}(t) \end{array} \right] = e^{-\operatorname {Re}(\lambda_j)t}\left[ \begin{array}{ll}\cos \operatorname {Im}(\lambda_jt) & \sin \operatorname {Im}(\lambda_jt)\\ -\sin \operatorname {Im}(\lambda_jt) & \cos \operatorname {Im}(\lambda_jt) \end{array} \right]\left[ \begin{array}{l}\tilde{\omega}_j(0)\\ \tilde{\omega}_{j + 1}(0) \end{array} \right].$
163
+
164
+ Note that we re-ordered the eigenvalues such that the complex conjugate eigenvalues form pairs: if $\lambda_{j} \notin \mathbb{R}$ then $\lambda_{j+1} = \bar{\lambda}_{j}$ .
165
+
166
+ Matrices in 2. and 3. are rotations matrices. They induce a rotational behavior illustrated in Fig 1a.
167
+
168
+ This proposition shows that the dynamics of $\omega(t)$ can be decomposed in a particular basis into attractions and rotations over components that do not interact between each other. Rotation does not appear in single objective minimization around a local minimum, because the eigenvalues of the Hessian of the objective are always real. Mescheder et al. (2017) discussed that difficulties in training GANs may be a result of the imaginary part of the eigenvalues of the Jacobian of the game vector field and Gidel et al. (2019b) mentioned that games have a natural oscillatory behavior. This cyclic behavior has been explained in (Balduzzi et al., 2018) by a non-zero Hamiltonian component in the Helmholtz decomposition of the Jacobian of the game vector field. All these explanations are related to the spectral properties of this Jacobian. The goal of Proposition 1 is to provide a formal definition to the notions of rotation and attraction we are dealing with in this paper.
169
+
170
+ In the following section, we introduce a new tool in order to assess the magnitude of the rotation around a LSSP compared to the attraction to this point.
171
+
172
+ # 4 VISUALIZATION FOR THE VECTOR FIELD LANDSCAPE
173
+
174
+ Neural networks are parametrized by a large number of variables and visualizations are only possible using low dimensional plots (1D or 2D). We first present a standard visualization tool for deep neural network loss surfaces that we will exploit in §4.2.
175
+
176
+ # 4.1 STANDARD VISUALIZATIONS FOR THE LOSS SURFACE
177
+
178
+ One way to visualize a neural network's loss landscape is to follow a parametrized path $\omega(\alpha)$ that connects two parameters $\omega, \omega'$ (often one is chosen early in learning and another one is chosen late in learning, close to a solution). A path is a continuous function $\omega(\cdot)$ such that $\omega(0) = \omega$ and $\omega(1) = \omega'$ . Goodfellow et al. (2015) considered a linear path $\omega(\alpha) = \alpha \omega + (1 - \alpha) \omega'$ . More complex paths can be considered to assess whether different minima are connected (Draxler et al., 2018).
179
+
180
+ # 4.2 PROPOSED VISUALIZATION: PATH-ANGLE
181
+
182
+ We propose to study the linear path between parameters early in learning and parameters late in learning. We illustrate the extreme cases for the game vector field along this path in simple examples in Figure 2(a-c): pure attraction occurs when the vector field perfectly points to the optimum (Fig. 2a) and pure rotation when the vector field is orthogonal to the direction to the optimum (Fig. 2b). In practice, we expect the vector field to be in between these two extreme cases (Fig. 2c). In order to determine in which case we are, around a LSSP, in practice, we propose the following tools.
183
+
184
+ **Path-norm.** We first ensure that we are in a neighborhood of a stationary point by computing the norm of the vector field. Note that considering independently the norm of each player may be misleading: even though the gradient of one player may be close to zero, it does not mean that we are at a stationary point since the other player might still be updating its parameters.
185
+
186
+ **Path-angle.** Once we are close to a final point $\omega'$ , i.e., in a neighborhood of a LSSP, we propose to look at the angle between the vector field (3) and the linear path from $\omega$ to $\omega'$ . Specifically, we monitor the cosine of this angle, a quantity we call Path-angle:
187
+
188
+ $$
189
+ c (\alpha) := \frac {\left\langle \boldsymbol {\omega} ^ {\prime} - \boldsymbol {\omega} , \boldsymbol {v} _ {\alpha} \right\rangle}{\| \boldsymbol {\omega} ^ {\prime} - \boldsymbol {\omega} \| \| \boldsymbol {v} _ {\alpha} \|} \quad \text {w h e r e} \quad \boldsymbol {v} _ {\alpha} := \boldsymbol {v} \left(\alpha \boldsymbol {\omega} ^ {\prime} + (1 - \alpha) \boldsymbol {\omega}\right), \alpha \in [ a, b ]. \tag {7}
190
+ $$
191
+
192
+ Usually $[a, b] = [0, 1]$ , but since we are interested in the landscape around a LSSP, it might be more informative to also consider further extrapolated points around $\omega'$ with $b > 1$ .
193
+
194
+ Eigenvalues of the Jacobian. Another important tool to gain insights on the behavior close to a LSSP, as discussed in §3.2, is to look at the eigenvalues of $\nabla v(\omega^{*})$ . We propose to compute the top-k eigenvalues of this Jacobian. When all the eigenvalues have positive real parts, we conclude that we have reached a LSSP, and if some eigenvalues have large imaginary parts, then the game has a strong rotational behavior (Thm. 1). Similarly, we can also compute the top-k eigenvalues of the diagonal blocks of the Jacobian, which correspond to the Hessian of each player. These eigenvalues can inform us on whether we have converged to a LSSP that is not a LNE.
195
+
196
+ An important advantage of the Path-angle relative to the computation of the eigenvalues of $\nabla v(\omega^{*})$ is that it only requires computing gradients (and not second order derivatives, which may be prohibitively computationally expensive for deep networks). Also, it provides information along a whole path between two points and thus, more global information than the Jacobian computed at a single point. In the following section, we use the Path-angle to study the archetypal behaviors presented in Thm 1.
197
+
198
+ # 4.3 ARCHETYPAL BEHAVIORS OF THE PATH-ANGLE AROUND A LSSP
199
+
200
+ Around a LSSP, we have seen in (6) that the behavior of the vector field is mainly dictated by the Jacobian matrix $\nabla \pmb{v}(\pmb{\omega}^{*})$ . This motivates the study of the behavior of the Path-angle $c(\alpha)$ where the Jacobian is a constant matrix:
201
+
202
+ $$
203
+ \boldsymbol {v} (\omega) = \left[ \begin{array}{l l} \boldsymbol {S} _ {1} & \boldsymbol {B} \\ \boldsymbol {A} & \boldsymbol {S} _ {2} \end{array} \right] (\omega - \omega^ {*}) \quad \text {a n d t h u s} \quad \nabla \boldsymbol {v} (\omega) = \left[ \begin{array}{l l} \boldsymbol {S} _ {1} & \boldsymbol {B} \\ \boldsymbol {A} & \boldsymbol {S} _ {2} \end{array} \right] \quad \forall \omega . \tag {8}
204
+ $$
205
+
206
+ ![](images/b28a56a2ea4f1353a97059a741ba09d2948e2d4162d62fa02eb9810130ee016d.jpg)
207
+
208
+ ![](images/d503e50c3ab6dae48b09d0801a440e967f66c3f7cd6131a77847a8ed161a1bf7.jpg)
209
+
210
+ ![](images/9de96669beab44703c7d316c28f1907d35433e6593b996c4eac547fe2bce1587.jpg)
211
+
212
+ ![](images/ed7d6b35b0509f1f78e731decf12803b4fc3b30a5933c7625c7181bc53e3128d.jpg)
213
+ (a) Attraction only
214
+
215
+ ![](images/7a4b44097828ac39a87e1e7359d71d98327fad6c99ed9d7931b6b73557387078.jpg)
216
+ (b) Rotation only
217
+
218
+ ![](images/faecfdf18244956a4322c7e34965dc1f3dc153a4773e4307129247e77c285500.jpg)
219
+ (c) Rotation and attraction
220
+ Figure 2: Above: game vector field (in grey) for different archetypal behaviors. The equilibrium of the game is at $(0,0)$ . Black arrows correspond to the directions of the vector field at different linear interpolations between two points: $\bullet$ and $\star$ . Below: path-angle $c(\alpha)$ for different archetypal behaviors (right y-axis, in blue). The left y-axis in orange correspond to the norm of the gradients. Notice the "bump" in path-angle (close to $\alpha = 1$ ), characteristic of rotational dynamics.
221
+
222
+ Depending on the choice of $S_{1}, S_{2}, A$ and $B$ , we cover the following cases:
223
+
224
+ - $S_{1}, S_{2} \succ 0, A = B = 0$ : eigenvalues are real. Thm. 1 ensures that we only have attraction. Far from $\omega^{*}$ , the gradient points to $\omega^{*}$ (See Fig. 2a) and thus $c(\alpha) = 1$ for $\alpha \ll 1$ and $c(\alpha) = -1$ for $\alpha \gg 1$ . Since $\omega'$ is not exactly $\omega^{*}$ , we observe a quick sign switch of the Path-angle around $\alpha = 1$ . We plotted the average Path-angle over different approximate optima in Fig. 2a (see appendix for details).
225
+ - $S_{1}, S_{2} = 0, A = -B^{\top}$ : eigenvalues are pure imaginary. Thm. 1 ensures that we only have rotations. Far from the optimum the gradient is orthogonal to the direction that points to $\omega$ (See Fig. 2b). Thus, $c(\alpha)$ vanishes for $\alpha \ll 1$ and $\alpha \gg 1$ . Because $\omega'$ is not exactly $\omega^{*}$ , around $\alpha = 1$ , the gradient is tangent to the circles induced by the rotational dynamics and thus $c(\alpha) = \pm 1$ . That is why in Fig. 2b we observe a bump in $c(\alpha)$ when $\alpha$ is close to 1.
226
+ - General high dimensional LSSP (4). The dynamics display both attraction and rotation. We observe a combination of the sign switch due to the attraction and the bump due to the rotation. The higher the bump, the closer we are to pure rotations. Since we are performing a low dimensional visualization, we actually project the gradient onto our direction of interest. That is why the Path-angle is significantly smaller than 1 in Fig. 2c.
227
+
228
+ # 5 NUMERICAL RESULTS ON GANS
229
+
230
+ Losses. We focus on two common GAN loss formulations: we consider both the original non-saturating GAN (NSGAN) formulation proposed in Goodfellow et al. (2014) and the WGAN-GP objective described in Gulrajani et al. (2017).
231
+
232
+ Datasets. We first propose to train a GAN on a toy task composed of a 1D mixture of 2 Gaussians (MoG) with 10,000 samples. For this task both the generator and discriminator are neural networks with 1 hidden layer and ReLU activations. We also train a GAN on MNIST, where we use the DCGAN architecture (Radford et al., 2016) with spectral normalization(see §C.2 for details). Finally we also look at the optimization landscape of a state of the art ResNet on CIFAR10 (Krizhevsky and Hinton, 2009).
233
+
234
+ **Optimization methods.** For the mixture of Gaussian (MoG) dataset, we used the full-batch extragradient method (Korpelevich, 1976; Gidel et al., 2019a). We also tried to use standard batch gradient descent, but this led to unstable results indicating that gradient descent might indeed be unable to
235
+
236
+ ![](images/bba788aabf039e83b1edd62a5e0c886944bce79262b7066ed6c58ab898508a83.jpg)
237
+ Figure 3: Path-angle for NSGAN (top row) and WGAN-GP (bottom row) trained on the different datasets, see Appendix C.3 for details on how the path-angle is computed. For MoG the ending point is a generator which has learned the distribution. For MNIST and CIFAR10 we indicate the Inception score (IS) at the ending point of the interpolation. Notice the "bump" in path-angle (close to $\alpha = 1.0$ ), characteristic of games rotational dynamics, and absent in the minimization problem (d). Details on error bars in §C.3.
238
+
239
+ ![](images/c4d25120a85cb93f1cff1bca542e53e219c87a0ce291e849b11c48361ebc7eb6.jpg)
240
+ Figure 4: Eigenvalues of the Jacobian of the game for NSGAN (top row) and WGAN-GP (bottom row) trained on the different datasets. Large imaginary eigenvalues are characteristic of rotational behavior. Notice that NSGAN and WGAN-GP objectives lead to very different landscapes (see how the eigenvalues of WGAN-GP are shifted to the right of the imaginary axis). This could explain the difference in performance between NSGAN and WGAN-GP.
241
+
242
+ converge to stable stationary points due to the rotations (see §C.4). On MNIST and CIFAR10, we tested both Adam (Kingma and Ba, 2015) and ExtraAdam (Gidel et al., 2019a). The observations made on models trained with both methods are very similar. ExtraAdam gives slightly better performance in terms of inception score (Salimans et al., 2016), and Adam sometimes converge to unstable points, thus we decided to only include the observations on ExtraAdam, for more details on the observations on Adam (see §C.5). As recommended by Heusel et al. (2017), we chose different learning rates for the discriminator and the generator. All the hyper-parameters and precise details about the experiments can be found in §C.1.
243
+
244
+ # 5.1 EVIDENCE OF ROTATION AROUND LOCALLY STABLE STATIONARY POINTS IN GANS
245
+
246
+ We first look, for all the different models and datasets, at the path-angles between a random initialization (initial point) and the set of parameters during training achieving the best performance (end point) (Fig. 3), and at the eigenvalues of the Jacobian of the game vector field for the same end point (Fig. 4). We're mostly interested in looking at the optimization landscape around LSSPs, so we first check if we are actually close to one. To do so we look at the gradient norm around the end point, this is shown by the orange curves in Fig.3, we can see that the norm of the gradient is quite small for all the models meaning that we are close to a stationary point. We also need to check that the point is stable, to do so we look at the eigenvalues of the Game in Fig. 4, if all the eigenvalues have positive real parts then the point is also stable. We observe that most of the time, the model has reached a LSSP. However we can see that this is not always the case, for example in Fig. 4d some of the eigenvalues have a negative real part. We still include those results since although the point is unstable it gives similar performance to a LSSP.
247
+
248
+ Our first observation is that all the GAN objectives on both datasets have a non zero rotational component. This can be seen by looking at the Path-angle in Fig. 3, where we always observe a bump, and this is also confirmed by the large imaginary part in the eigenvalues of the Jacobian in Fig. 4. The rotational component is clearly visible in Fig. 3d, where we see no sign switch and a clear bump similar to Fig. 2b. On MNIST and CIFAR10, with NSGAN and WGAN-GP (see Fig. 3), we observe a combination of a bump and a sign switch similar to Fig. 2c. Also Fig. 4 clearly shows the existence of imaginary eigenvalues with large magnitude. Fig. 4c and 4e. We can see that while almost all models exhibit rotations, the distribution of the eigenvalues are very different. In particular the complex eigenvalues for NSGAN seems to be much more concentrated on the imaginary axis while WGAN-GP tends to spread the eigenvalues towards the right of the imaginary axis Fig. 4e. This shows that different GAN objectives can lead to very different landscapes, and has implications in terms of optimization, in particular that might explain why WGAN-GP performs slightly better than NSGAN.
249
+
250
+ ![](images/99d86c7a800bf537219162f963240ec5c3102e064c4f6d8705d85810397242f0.jpg)
251
+
252
+ ![](images/a03326938ab0eb4ee5aea42ad17552d48fe0e57e8c4676bda470d56986fd877b.jpg)
253
+
254
+ ![](images/7ca0fe105596135e4739b815cad8755a6e53664aa612e90dfcfac7cff13cf793.jpg)
255
+
256
+ ![](images/b2b7ed39c33887570381fb9f47967283284cad365fb044550e6593fadfd57500.jpg)
257
+ (a) MoG
258
+
259
+ ![](images/6a4a07e1e5625960aaf978f0d6ded521ba37b0c51b5f17ed201fc7d74ae90939.jpg)
260
+ (b) MNIST, IS = 8.97
261
+ Figure 5: NSGAN. Top $k$ -Eigenvalues of the Hessian of each player (in terms of magnitude) in descending order. Top Eigenvalues indicate that the Generator does not reach a local minimum but a saddle point (for CIFAR10 actually both the generator and discriminator are at saddle points). Thus the training algorithms converge to LSSPs which are not Nash equilibria.
262
+
263
+ ![](images/fc50c6d333756610dded1a64a5a11d1febb3f22ac6dcc09dea19ac945e91feda.jpg)
264
+ (c) CIFAR10, IS = 7.33
265
+
266
+ # 5.2 THE LOCALLY STABLE STATIONARY POINTS OF GANS ARE NOT LOCAL NASH EQUILIBRIA
267
+
268
+ As mentioned at the beginning of §5.1, the points we are considering are most of the times LSSP. To check if these points are also local Nash equilibria (LNE) we compute the eigenvalues of the Hessian of each player independently. If all the eigenvalues of each player are positive, it means that we have reached a DNE. Since the computation of the full spectrum of the Hessians is expensive, we restrict ourselves to the top-k eigenvalues with largest magnitude: exhibiting one significant negative eigenvalue is enough to indicate that the point considered is not in the neighborhood of a
269
+
270
+ ![](images/d152f30d9d1e962bb1e29256ba06d5ff6683518ca00e15de27850c0b5cf14a45.jpg)
271
+ Figure 6: WGAN-GP. Top $k$ -Eigenvalues of the Hessian of each player (in terms of magnitude) in descending order. Top Eigenvalues indicate that the Generator does not reach a local minimum but a saddle point. Thus the training algorithms converge to LSSPs which are not Nash equilibria.
272
+
273
+ LNE. Results are shown in Fig. 5 and Fig. 6, from which we make several observations. First, we see that the generator never reaches a local minimum but instead finds a saddle point. This means that the algorithm converges to a LSSP which is not a LNE, while achieving good results with respect to our evaluation metrics. This raises the question whether convergence to a LNE is actually needed or if converging to a LSSP is sufficient to reach a good solution. We also observe a large difference in the eigenvalues of the discriminator when using the WGAN-GP v.s. the NSGAN objective. In particular, we find that the discriminator in NSGAN converges to a solution with very large positive eigenvalues compared to WGAN-GP. This shows that the discriminator in NSGAN converges to a much sharper minimum. This is consistent with the fact that the gradient penalty acts as a regularizer on the discriminator and prevents it from becoming too sharp.
274
+
275
+ # 6 DISCUSSION
276
+
277
+ Across different GAN formulations, standard optimization methods and datasets, we consistently observed that GANs do not converge to local Nash equilibria. Instead the generator often ends up being at a saddle point of the generator loss function. However, in practice, these LSSP achieve really good generator performance metrics, which leads us to question whether we need a Nash equilibrium to get a generator with good performance in GANs and whether such DNE with good performance does actually exist. Moreover, we have provided evidence that the optimization landscapes of GANs typically have rotational components specific to games. We argue that these rotational components are part of the reason why GANs are challenging to train, in particular that the instabilities observed during training may come from such rotations close to LSSP. It shows that simple low dimensional examples, such as for instance Dirac GAN, does capture some of the arising challenges for training large scale GANs, thus, motivating the practical use of method able to handle strong rotational components, such as extragradient (Gidel et al., 2019a), averaging (Yazici et al., 2019), optimism (Daskalakis et al., 2018) or gradient penalty based methods (Mescheder et al., 2017; Gulrajani et al., 2017).
278
+
279
+ # ACKNOWLEDGMENTS.
280
+
281
+ The contribution to this research by Mila, Université de Montréal authors was partially supported by the Canada CIFAR AI Chair Program (held at Mila), the Canada Excellence Research Chair in “Data Science for Realtime Decision-making”, by the NSERC Discovery Grant RGPIN-2017-06936 (held at Université de Montréal), by a Borealis AI fellowship and by a Google Focused Research award. The authors would like to thank Tatjana Chavdarova for fruitful discussions.
282
+
283
+ # REFERENCES
284
+
285
+ L. Adolphs, H. Daneshmand, A. Lucchi, and T. Hofmann. Local saddle point optimization: A curvature exploitation approach. arXiv, 2018.
286
+ G. Alain, N. Le Roux, and P.-A. Manzagol. Negative eigenvalues of the hessian in deep neural networks. arXiv, 2019.
287
+ M. Arjovsky, S. Chintala, and L. Bottou. Wasserstein generative adversarial networks. In ICML, 2017.
288
+ D. Balduzzi, S. Racaniere, J. Martens, J. Foerster, K. Tuyls, and T. Graepel. The mechanics of n-player differentiable games. In ICML, 2018.
289
+ A. Brock, J. Donahue, and K. Simonyan. Large scale GAN training for high fidelity natural image synthesis. In ICLR, 2019.
290
+ A. Choromanska, M. Henaff, M. Mathieu, G. B. Arous, and Y. LeCun. The loss surfaces of multilayer networks. In Artificial Intelligence and Statistics, 2015.
291
+ C. Daskalakis, A. Ilyas, V. Syrgkanis, and H. Zeng. Training GANs with optimism. In *ICLR*, 2018.
292
+ Y. N. Dauphin, R. Pascanu, C. Gulcehre, K. Cho, S. Ganguli, and Y. Bengio. Identifying and attacking the saddle point problem in high-dimensional non-convex optimization. In NeurIPS, 2014.
293
+ F. Draxler, K. Veschgini, M. Salmhofer, and F. Hamprecht. Essentially no barriers in neural network energy landscape. In ICML, 2018.
294
+ G. Gidel, H. Berard, P. Vincent, and S. Lacoste-Julien. A variational inequality perspective on generative adversarial nets. *ICLR*, 2019a.
295
+ G. Gidel, R. A. Hemmat, M. Pezeshki, G. Huang, R. Lepriol, S. Lacoste-Julien, and I. Mitliagkas. Negative momentum for improved game dynamics. In AISTATS, 2019b.
296
+ X. Glorot and Y. Bengio. Understanding the difficulty of training deep feedforward neural networks. In AISTATS, 2010.
297
+ I. Goodfellow. Neurips 2016 tutorial: Generative adversarial networks. arXiv:1701.00160, 2016.
298
+ I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, and Y. Bengio. Generative adversarial nets. In NeurIPS, 2014.
299
+ I. J. Goodfellow, O. Vinyals, and A. M. Saxe. Qualitatively characterizing neural network optimization problems. In ICLR, 2015.
300
+ I. Gulrajani, F. Ahmed, M. Arjovsky, V. Dumoulin, and A. C. Courville. Improved training of wasserstein GANs. In NeurIPS, 2017.
301
+ M. Heusel, H. Ramsauer, T. Unterthiner, B. Nessler, and S. Hochreiter. GANs trained by a two time-scale update rule converge to a local nash equilibrium. In NeurIPS, 2017.
302
+ K. Kawaguchi. Deep learning without poor local minima. In NeurIPS, 2016.
303
+ D. P. Kingma and J. Ba. Adam: A method for stochastic optimization. In ICLR, 2015.
304
+ G. Korpelevich. The extragradient method for finding saddle points and other problems. Matecon, 1976.
305
+ A. Krizhevsky and G. Hinton. Learning multiple layers of features from tiny images. Technical report, Citeseer, 2009.
306
+ A. Krizhevsky, I. Sutskever, and G. E. Hinton. Imagenet classification with deep convolutional neural networks. In NeurIPS, 2012.
307
+ Y. LeCun, C. Cortes, and C. Burges. MNIST handwritten digit database. AT&T Labs [Online]. Available: http://yann.lecun.com/exdb/mnist, 2010.
308
+
309
+ H. Li, Z. Xu, G. Taylor, C. Studer, and T. Goldstein. Visualizing the loss landscape of neural nets. In NeurIPS, 2018a.
310
+ J. Li, A. Madry, J. Peebles, and L. Schmidt. On the limitations of first order approximation in gan dynamics. In ICML, 2018b.
311
+ E. Mazumdar and L. J. Ratliff. On the convergence of gradient-based learning in continuous games. ArXiv, 2018.
312
+ E. V. Mazumdar, M. I. Jordan, and S. S. Sastry. On finding local nash equilibria (and only local nash equilibria) in zero-sum games. arXiv, 2019.
313
+ L. Mescheder, S. Nowozin, and A. Geiger. The numerics of GANs. In NeurIPS, 2017.
314
+ L. Mescheder, A. Geiger, and S. Nowozin. Which Training Methods for GANs do actually Converge? In ICML, 2018.
315
+ T. Miyato, T. Kataoka, M. Koyama, and Y. Yoshida. Spectral normalization for generative adversarial networks. In ICLR, 2018.
316
+ V. Nagarajan and J. Z. Kolter. Gradient descent GAN optimization is locally stable. In NeurIPS, 2017.
317
+ W. Paul. Electromagnetic traps for charged and neutral particles. Reviews of modern physics, 1990.
318
+ B. A. Pearlmutter. Fast exact multiplication by the hessian. Neural computation, 1994.
319
+ A. Radford, L. Metz, and S. Chintala. Unsupervised representation learning with deep convolutional generative adversarial networks. In ICLR, 2016.
320
+ L. J. Ratliff, S. A. Burden, and S. S. Sastry. On the characterization of local nash equilibria in continuous games. In IEEE Transactions on Automatic Control, 2016.
321
+ L. Sagun, L. Bottou, and Y. LeCun. Eigenvalues of the hessian in deep learning: Singularity and beyond. arXiv, 2016.
322
+ L. Sagun, U. Evci, V. U. Guney, Y. Dauphin, and L. Bottou. Empirical analysis of the hessian of over-parametrized neural networks. arXiv, 2017.
323
+ T. Salimans, I. Goodfellow, W. Zaremba, V. Cheung, A. Radford, and X. Chen. Improved techniques for training GANs. In NeurIPS, 2016.
324
+ R. Thompson, T. Harmon, and M. Ball. The rotating-saddle trap: A mechanical analogy to rf-electric-quadrupole ion trapping? Canadian journal of physics, 2002.
325
+ F. Verhulst. *Nonlinear differential equations and dynamical systems*. Springer Science & Business Media, 1989.
326
+ Y. Yazici, C.-S. Foo, S. Winkler, K.-H. Yap, G. Piliouras, and V. Chandrasekhar. The unusual effectiveness of averaging in GAN training. In ICLR, 2019.
327
+
328
+ # A PROOF OF THEOREMS AND PROPOSITIONS
329
+
330
+ # A.1 PROOF OF THEOREM 1
331
+
332
+ Let us recall the theorem of interest:
333
+
334
+ Proposition' 1. Let us assume that (6) is an equality and that $\nabla v(\omega^{*})$ is diagonalizable, then there exists a basis $P$ such that the coordinates $\tilde{\omega} (t)\coloneqq P(\omega (t) - \omega^{*})$ have the following behavior;
335
+
336
+ 1. For $\lambda_j \in \mathrm{Sp} \nabla \pmb{v}(\pmb{\omega}^*)$ , $\lambda_j \in \mathbb{R}$ , we observe pure attraction: $\tilde{\omega}_j(t) = e^{-\lambda_j t} [\tilde{\omega}_j(0)$ .
337
+ 2. For $\lambda_j \in \mathrm{Sp} \nabla \pmb{v}(\pmb{\omega}^*)$ , $\Re(\lambda_j) = 0$ , we observe pure rotation: $\left[ \begin{array}{c} \tilde{\omega}_j(t) \\ \tilde{\omega}_{j+1}(t) \end{array} \right] = R_{|\lambda_j|t} \left[ \begin{array}{c} \tilde{\omega}_j(0) \\ \tilde{\omega}_{j+1}(0) \end{array} \right]$ .
338
+ 3. Otherwise, we observe both: $\left[ \begin{array}{c}\tilde{\omega}_j(t)\\ \tilde{\omega}_{j + 1}(t) \end{array} \right] = e^{-\operatorname {Re}(\lambda_j)t}R_{\operatorname {Im}(\lambda_j)t}\left[ \begin{array}{c}\tilde{\omega}_j(0)\\ \tilde{\omega}_{j + 1}(0) \end{array} \right].$
339
+
340
+ The matrix $R_{\varphi}$ corresponds to a rotation of angle $\varphi$ . Note that, we re-ordered the eigenvalues such that the complex conjugate eigenvalues form pairs: if $\lambda_j \notin \mathbb{R}$ then $\lambda_{j+1} = \bar{\lambda}_j$ .
341
+
342
+ Proof. The ODE we consider is,
343
+
344
+ $$
345
+ \frac {d \boldsymbol {\omega} (t)}{d t} = \nabla \boldsymbol {v} \left(\boldsymbol {\omega} ^ {*}\right) \left(\boldsymbol {\omega} (t) - \boldsymbol {\omega} ^ {*}\right) \tag {9}
346
+ $$
347
+
348
+ The solution of this ODE is
349
+
350
+ $$
351
+ \boldsymbol {\omega} (t) = e ^ {- (t - t _ {0}) \nabla \boldsymbol {v} \left(\boldsymbol {\omega} ^ {*}\right)} \left(\boldsymbol {\omega} \left(t _ {0}\right) - \boldsymbol {\omega} ^ {*}\right) + \boldsymbol {\omega} ^ {*} \tag {10}
352
+ $$
353
+
354
+ Let us now consider $\lambda$ an eigenvalue of $\mathrm{Sp}(\nabla v(\omega^{*}))$ such that $\mathrm{Re}(\lambda) > 0$ and $\mathrm{Im}(\lambda)\neq 0$ . Since $\nabla v(\omega^{*})$ is a real matrix and $\mathrm{Im}(\lambda)\neq 0$ we know that the complex conjugate $\bar{\lambda}$ of $\lambda$ belongs to $\mathrm{Sp}(\nabla v(\omega^{*}))$ . Let $u_{0}$ be a complex eigenvector of $\lambda$ , then we have that,
355
+
356
+ $$
357
+ \nabla \boldsymbol {v} \left(\boldsymbol {\omega} ^ {*}\right) \boldsymbol {u} _ {0} = \lambda \boldsymbol {u} _ {0} \quad \Rightarrow \quad \nabla \boldsymbol {v} \left(\boldsymbol {\omega} ^ {*}\right) \bar {\boldsymbol {u}} _ {0} = \bar {\lambda} \bar {\boldsymbol {u}} _ {0} \tag {11}
358
+ $$
359
+
360
+ and thus $\bar{u}_0$ is a eigenvector of $\bar{\lambda}$ . Now if we set $u_1 \coloneqq u_0 + \bar{u}_0$ and $i u_2 \coloneqq u_0 - \bar{u}_0$ , we have that
361
+
362
+ $$
363
+ e ^ {- t \nabla \boldsymbol {v} \left(\boldsymbol {\omega} ^ {*}\right)} \boldsymbol {u} _ {1} = e ^ {- t \lambda} \boldsymbol {u} _ {0} + e ^ {- t \bar {\lambda}} \bar {\boldsymbol {u}} _ {0} = \operatorname {R e} \left(e ^ {- t \lambda}\right) \boldsymbol {u} _ {1} + \operatorname {I m} \left(e ^ {- t \lambda}\right) \boldsymbol {u} _ {2} \tag {12}
364
+ $$
365
+
366
+ $$
367
+ e ^ {- t \nabla \boldsymbol {v} \left(\boldsymbol {\omega} ^ {*}\right)} i \boldsymbol {u} _ {2} = e ^ {- t \lambda} \boldsymbol {u} _ {0} - e ^ {- t \bar {\lambda}} \bar {\boldsymbol {u}} _ {0} = i (\operatorname {R e} (e ^ {- t \lambda}) \boldsymbol {u} _ {2} - \operatorname {I m} (e ^ {- t \lambda}) \boldsymbol {u} _ {1}) \tag {13}
368
+ $$
369
+
370
+ Thus if we consider the basis that diagonalizes $\nabla \pmb{v}(\pmb{\omega}^{*})$ and modify the complex conjugate eigenvalues in the way we described right after 11 we get the expected diagonal form in a real basis. Thus there exists $\pmb{P}$ such that
371
+
372
+ $$
373
+ \nabla \boldsymbol {v} \left(\boldsymbol {\omega} ^ {*}\right) = P D P ^ {- 1} \tag {14}
374
+ $$
375
+
376
+ where $D$ is the block diagonal matrix with the block described in Theorem 1.
377
+
378
+ # A.2 BEING A DNE IS NEITHER NECESSARY OR SUFFICIENT FOR BEING A LSSP
379
+
380
+ Let us first recall Example 1.
381
+
382
+ Example' 1. Let us consider $\mathcal{L}_G$ as a hyperbolic paraboloid (a.k.a., saddle point function) centered in $(1,1)$ where $(1,\varphi)$ is the principal descent direction and $(- \varphi, 1)$ is the principal ascent direction, while $\mathcal{L}_D$ is a simple bilinear objective.
383
+
384
+ $$
385
+ \mathcal {L} _ {G} \left(\theta_ {1}, \theta_ {2}, \varphi\right) = \left(\theta_ {2} - \varphi \theta_ {1} - 1\right) ^ {2} - \frac {1}{2} \left(\theta_ {1} + \varphi \theta_ {2} - 1\right) ^ {2}, \quad \mathcal {L} _ {D} \left(\theta_ {1}, \theta_ {2}, \varphi\right) = \varphi \left(5 \theta_ {1} + 4 \theta_ {2} - 9\right)
386
+ $$
387
+
388
+ We want to show that $(1,1,0)$ is a locally stable stationary point.
389
+
390
+ Proof. The game vector field has the following form,
391
+
392
+ $$
393
+ \boldsymbol {v} \left(\theta_ {1}, \theta_ {2}, \varphi\right) = \binom {\left(2 \varphi^ {2} - 1\right) \theta_ {1} - 3 \varphi \theta_ {2} + 2 \varphi + 1} {\left. \begin{array}{c} \left(2 - \varphi^ {2}\right) \theta_ {2} - 3 \varphi \theta_ {1} - 2 + \varphi \\ 5 \theta_ {1} + 4 \theta_ {2} - 9 \end{array} \right)} \tag {15}
394
+ $$
395
+
396
+ Thus, $(\theta_1^*,\theta_2^*,\varphi^*)\coloneqq (1,1,0)$ is a stationary point (i.e., $\pmb {v}(\theta_1^*,\theta_2^*,\varphi^*) = 0$ ). The Jacobian of the game vector field is
397
+
398
+ $$
399
+ \nabla \boldsymbol {v} \left(\theta_ {1}, \theta_ {2}, \varphi\right) = \left( \begin{array}{c c c} 2 \varphi^ {2} - 1 & - 3 \varphi & 2 - 3 \theta_ {2} \\ - 3 \varphi & 2 - \varphi^ {2} & 1 - 3 \theta_ {1} \\ 5 & 4 & 0 \end{array} \right), \tag {16}
400
+ $$
401
+
402
+ and thus,
403
+
404
+ $$
405
+ \nabla \boldsymbol {v} \left(\theta_ {1} ^ {*}, \theta_ {2} ^ {*}, \varphi^ {*}\right) = \left( \begin{array}{c c c} - 1 & 0 & - 1 \\ 0 & 2 & - 2 \\ 5 & 4 & 0 \end{array} \right). \tag {17}
406
+ $$
407
+
408
+ We can verify that the eigenvalues of this matrix have a positive real part with any solver (the eigenvalues of a $3 \times 3$ always have a closed form). For completeness we provide a proof without using the closed form of the eigenvalues. The eigenvalues $\nabla v(\theta_1^*,\theta_2^*,\varphi^*)$ are given by the roots of its characteristic polynomial,
409
+
410
+ $$
411
+ \chi (X) := \left| \begin{array}{c c c} X + 1 & 0 & 1 \\ 0 & X - 2 & 2 \\ - 5 & - 4 & 0 \end{array} \right| = X ^ {3} - X ^ {2} + 1 1 X - 2. \tag {18}
412
+ $$
413
+
414
+ This polynomial has a real root in $(0,1)$ because $\chi(0) = -2 < 0 < 9 = \chi(1)$ . Thus we know that, there exists $\alpha \in (0,1)$ such that,
415
+
416
+ $$
417
+ X ^ {3} - X ^ {2} + 1 1 X - 2 = (X - \alpha) \left(X - \lambda_ {1}\right) \left(X - \lambda_ {2}\right). \tag {19}
418
+ $$
419
+
420
+ Then we have the equalities,
421
+
422
+ $$
423
+ \alpha \lambda_ {1} \lambda_ {2} = 2 \tag {20}
424
+ $$
425
+
426
+ $$
427
+ \alpha + \lambda_ {1} + \lambda_ {2} = 1. \tag {21}
428
+ $$
429
+
430
+ Thus, since $0 < \alpha < 1$ , we have that,
431
+
432
+ - If $\lambda_{1}$ and $\lambda_{2}$ are real, they have the same sign $\lambda_{1}\lambda_{2} = 2 / \alpha >0$ ) and thus are positive $(\lambda_{1} + \lambda_{2} = 1 - \alpha >0)$ .
433
+ - If $\lambda_{1}$ is complex then $\lambda_{2} = \bar{\lambda}_{1}$ and thus, $2\Re (\lambda_1) = \lambda_1 + \lambda_2 = 1 - \alpha >0$
434
+
435
+ ![](images/d5b308671293f2554d31425054b3313353657747d7f9721168c065321b4fe8d8.jpg)
436
+
437
+ Example 1 showed that LSSP did not imply DNE. Let us construct an example where a game has a DNE which is not locally stable.
438
+
439
+ Example 2. Consider the non-zero-sum game with the following respective losses for each player,
440
+
441
+ $$
442
+ \mathcal {L} _ {1} (\theta , \phi) = 4 \theta^ {2} + \left(\frac {1}{2} \phi^ {2} - 1\right) \cdot \theta \quad a n d \quad \mathcal {L} _ {2} (\theta , \phi) = (4 \theta - 1) \phi + \frac {1}{6} \theta^ {3} \tag {22}
443
+ $$
444
+
445
+ This game has two stationary points for $\theta = 0$ and $\phi = \pm 1$ . The Jacobian of the dynamics at these two points are
446
+
447
+ $$
448
+ \nabla \boldsymbol {v} (0, 1) = \left( \begin{array}{l l} 1 & 1 / 2 \\ 2 & 1 / 2 \end{array} \right) \quad \text {a n d} \quad \nabla \boldsymbol {v} (0, - 1) = \left( \begin{array}{l l} 1 & - 1 / 2 \\ 2 & - 1 / 2 \end{array} \right) \tag {23}
449
+ $$
450
+
451
+ Thus,
452
+
453
+ - The stationary point $(0,1)$ is a DNE but $\mathrm{Sp}(\nabla \pmb{v}(0,1)) = \left\{\frac{3 \pm \sqrt{17}}{4}\right\}$ contains an eigenvalue with negative real part and so is not a LSSP.
454
+ - The stationary point $(0, -1)$ is not a DNE but $\operatorname{Sp}(\nabla v(0, 1)) = \left\{\frac{1 \pm i\sqrt{7}}{4}\right\}$ contains only eigenvalue with positive real part and so is a LSSP.
455
+
456
+ # B COMPUTATION OF THE TOP-K EIGENVALUES OF THE JACOBIAN
457
+
458
+ Neural networks usually have a large number of parameters, this usually makes the storing of the full Jacobian matrix impossible. However the Jacobian vector product can be efficiently computed by using the trick from (Pearlmutter, 1994). Indeed it's easy to show that $\nabla \pmb{v}(\pmb{\omega})\pmb{u} = \nabla (\pmb{v}(\pmb{\omega})^T\pmb{u})$
459
+
460
+ To compute the eigenvalues of the Jacobian of the Game, we first compute the gradient $\pmb{v}(\omega)$ over a subset of the dataset. We then define a function that computes the Jacobian vector product using automatic differentiation. We can then use this function to compute the top-k eigenvalues of the Jacobian using the sparse.linalg.eigs functions of the Scipy library.
461
+
462
+ # C EXPERIMENTAL DETAILS
463
+
464
+ # C.1 MIXTURE OF GAUSSIAN EXPERIMENT
465
+
466
+ Dataset. The Mixture of Gaussian dataset is composed of 10,000 points sampled independently from the following distribution $p_{\mathcal{D}}(x) = \frac{1}{2}\mathcal{N}(2,0.5) + \frac{1}{2}\mathcal{N}(-2,1)$ where $\mathcal{N}(\mu, \sigma^2)$ is the probability density function of a 1D-Gaussian distribution with mean $\mu$ and variance $\sigma^2$ . The latent variables $z \in \mathbb{R}^d$ are sampled from a standard Normal distribution $\mathcal{N}(0, I_d)$ . Because we want to use full-batch methods, we sample 10,000 points that we re-use for each iteration during training.
467
+
468
+ Neural Networks Architecture. Both the generator and discriminator are one hidden layer neural networks with 100 hidden units and ReLU activations.
469
+
470
+ WGAN Clipping. Because of the clipping of the discriminator parameters some components of the gradient of the discriminator's gradient should no be taken into account. In order to compute the relevant path angle we apply the following filter to the gradient:
471
+
472
+ $$
473
+ \mathbf {1} \left\{\left(\left| \varphi \right| = \mathbf {c}\right) \text {a n d} \left(\operatorname {s i g n} \nabla_ {\varphi} \mathcal {L} _ {\mathbf {D}} (\omega) = - \operatorname {s i g n} \varphi\right) \right\} \tag {24}
474
+ $$
475
+
476
+ where $\varphi$ is clipped between $-c$ and $c$ . If this condition holds for a coordinate of the gradient then it means that after a gradient step followed by a clipping the value of the coordinate will not change.
477
+
478
+ <table><tr><td colspan="2">Hyperparameters for WGAN-GP on MoG</td></tr><tr><td>Batch size</td><td>= 10,000 (Full-Batch)</td></tr><tr><td>Number of iterations</td><td>= 30,000</td></tr><tr><td>Learning rate for generator</td><td>= 1 × 10-2</td></tr><tr><td>Learning rate for discriminator</td><td>= 1 × 10-1</td></tr><tr><td>Gradient Penalty coefficient</td><td>= 1 × 10-3</td></tr></table>
479
+
480
+ <table><tr><td colspan="2">Hyperparameters for NSGAN on MoG</td></tr><tr><td>Batch size</td><td>= 10,000 (Full-Batch)</td></tr><tr><td>Number of iterations</td><td>= 30,000</td></tr><tr><td>Learning rate for generator</td><td>= 1 × 10-1</td></tr><tr><td>Learning rate for discriminator</td><td>= 1 × 10-1</td></tr></table>
481
+
482
+ # C.2 MNIST EXPERIMENT
483
+
484
+ Dataset We use the training part of MNIST dataset LeCun et al. (2010) (50K examples) for training our models, and scale each image to the range $[-1, 1]$ .
485
+
486
+ Architecture We use the DCGAN architecture Radford et al. (2016) for our generator and discriminator, with both the NSGAN and WGAN-GP objectives. The only change we make is that we replace the Batch-norm layer in the discriminator with a Spectral-norm layer Miyato et al. (2018), which we find to stabilize training.
487
+
488
+ # Training Details
489
+
490
+ <table><tr><td colspan="2">Hyperparameters for NSGAN with Adam</td></tr><tr><td>Batch size</td><td>= 100</td></tr><tr><td>Number of iterations</td><td>= 100,000</td></tr><tr><td>Learning rate for generator</td><td>= 2 × 10-4</td></tr><tr><td>Learning rate for discriminator</td><td>= 5 × 10-5</td></tr><tr><td>β1</td><td>= 0.5</td></tr></table>
491
+
492
+ <table><tr><td colspan="2">Hyperparameters for NSGAN with ExtraAdam</td></tr><tr><td>Batch size</td><td>= 100</td></tr><tr><td>Number of iterations</td><td>= 100,000</td></tr><tr><td>Learning rate for generator</td><td>= 2 × 10-4</td></tr><tr><td>Learning rate for discriminator</td><td>= 5 × 10-5</td></tr><tr><td>β1</td><td>= 0.9</td></tr></table>
493
+
494
+ <table><tr><td colspan="2">Hyperparameters for WGAN-GP with Adam</td></tr><tr><td>Batch size</td><td>= 100</td></tr><tr><td>Number of iterations</td><td>= 200,000</td></tr><tr><td>Learning rate for generator</td><td>= 8.6 × 10-5</td></tr><tr><td>Learning rate for discriminator</td><td>= 8.6 × 10-5</td></tr><tr><td>β1</td><td>= 0.5</td></tr><tr><td>Gradient penalty λ</td><td>= 10</td></tr><tr><td>Critic per Gen. iterations λ</td><td>= 5</td></tr></table>
495
+
496
+ <table><tr><td colspan="2">Hyperparameters for WGAN-GP with ExtraAdam</td></tr><tr><td>Batch size</td><td>= 100</td></tr><tr><td>Number of iterations</td><td>= 200,000</td></tr><tr><td>Learning rate for generator</td><td>= 8.6 × 10-5</td></tr><tr><td>Learning rate for discriminator</td><td>= 8.6 × 10-5</td></tr><tr><td>β1</td><td>= 0.9</td></tr><tr><td>Gradient penalty λ</td><td>= 10</td></tr><tr><td>Critic per Gen. iterations λ</td><td>= 5</td></tr></table>
497
+
498
+ Computing Inception Score on MNIST We compute the inception score (IS) for our models using a LeNet classifier pretrained on MNIST. The average IS score of real MNIST data is 9.9.
499
+
500
+ # C.3 PATH-ANGLE PLOT
501
+
502
+ We use the path-angle plot to illustrate the dynamics close to a LSSP. To compute this plot, we need to choose an initial point $\omega$ and an end point $\omega'$ . We choose the $\omega$ to be the parameters at initialization, but $\omega'$ can more subtle to choose. In practice, when we use stochastic gradient methods we typically reach a neighborhood of a LSSP where the norm of the gradient is small. However, due to the stochastic noise, we keep moving around the LSSP. In order to be robust to the choice of the end point $\omega'$ , we take multiple close-by points during training that have good performance (e.g., high IS in MNIST). In all of figures, we compute the path-angle (and path-norm) for all these end points (with the same start point), and we plot the median path-angle (middle line) and interquartile range (shaded area).
503
+
504
+ # C.4 INSTABILITY OF GRADIENT DESCENT
505
+
506
+ For the MoG dataset we tried both the extragradient method (Korpelevich, 1976; Gidel et al., 2019a) and the standard gradient descent. We observed that gradient descent leads to unstable results. In
507
+
508
+ particular the norm of the gradient has very large variance compared to extragradient this is shown in Fig. 7.
509
+
510
+ ![](images/4854bc4f10b6c8d9eb9c4b10cc00f747d5841cb5b429e4702fb89667e5c9520f.jpg)
511
+ Figure 7: The norm of gradient during training for the standard GAN objective. We observe that while extra-gradient reaches low norm which indicates that it has converged, the gradient descent on the contrary doesn't seem to converge.
512
+
513
+ # C.5 ADDITIONAL RESULTS WITH ADAM
514
+
515
+ ![](images/a2cc8e108a304c4fa2ab970a6b90d0ee9421162efdc9b7c3661bdc2ee5ddad85.jpg)
516
+
517
+ ![](images/ac40021f3f12bd7651ee91e966bdc195f49fe8bc3324f32b8ef0cfef76c017ca.jpg)
518
+
519
+ ![](images/521160ec147b811ac89dd88d57b2bb7b6ba9578a0b1ab9a3f42a4a04a649aee3.jpg)
520
+ (a) NSGAN on MNIST, IS: 8.95
521
+ Figure 8: Path-angle and Eigenvalues computed on MNIST with Adam.
522
+
523
+ ![](images/7e4e32acda1fcd083689f9478ece9a88839b869ac1776a71c5d8f5c2a74b27ef.jpg)
524
+ (b) WGAN-GP on MNIST, IS: 9.30
525
+
526
+ ![](images/b89060e5c7cd3e4d37cdcbc5798d62e514bc3520f72b4e65c1c9e721a09f9ed2.jpg)
527
+ Figure 9: Path-angle and Eigenvalues for NSGAN on CIFAR10 computed on CIFAR10 with Adam. We can see that the model has eigenvalues with negative real part, this means that we've actually reached an unstable point.
528
+
529
+ ![](images/fe8e0bc7d073f3113018ffa7847e6c85f1ff963f5094529317526ed9d5dac08b.jpg)
acloserlookattheoptimizationlandscapesofgenerativeadversarialnetworks/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc60a19b04412bf24e25cc20bec0447449018f7e6056d3bf3b7d560168d115ad
3
+ size 815251
acloserlookattheoptimizationlandscapesofgenerativeadversarialnetworks/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf98d42d9890be59675b45247d75dd237f7cec5f366c0f4afd221a38b4cd317c
3
+ size 638468
aconstructivepredictionofthegeneralizationerroracrossscales/5fc9bbe4-4ad0-43cd-8f35-77d285d04164_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca8a393be7e60341abf7852f0dc7a0939e3794ff0c0c81ad18e3adc1b53692e7
3
+ size 115616
aconstructivepredictionofthegeneralizationerroracrossscales/5fc9bbe4-4ad0-43cd-8f35-77d285d04164_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa3ef807f4f57ec4eb1a620d5b8931c64f281f20a3c9e8b75ba6f9129a48f50c
3
+ size 150692
aconstructivepredictionofthegeneralizationerroracrossscales/5fc9bbe4-4ad0-43cd-8f35-77d285d04164_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df81adff9705f3d61c459b07e66668e124c0273a5cbe7dfe93c438d9cb4a9344
3
+ size 2808820
aconstructivepredictionofthegeneralizationerroracrossscales/full.md ADDED
@@ -0,0 +1,521 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A CONSTRUCTIVE PREDICTION OF THE GENERALIZATION ERROR ACROSS SCALES
2
+
3
+ Jonathan S. Rosenfeld<sup>1</sup> Amir Rosenfeld<sup>2</sup> Yonatan Belinkov<sup>13</sup> Nir Shavit<sup>145</sup>
4
+
5
+ {jonsr, belinkov, shanir}@csail.mit.edu amir@cse.yorku.ca
6
+
7
+ 1 Massachusetts Institute of Technology 2 York University 3 Harvard University
8
+
9
+ 4 Neural Magic Inc 5 Tel Aviv University
10
+
11
+ # ABSTRACT
12
+
13
+ The dependency of the generalization error of neural networks on model and dataset size is of critical importance both in practice and for understanding the theory of neural networks. Nevertheless, the functional form of this dependency remains elusive. In this work, we present a functional form which approximates well the generalization error in practice. Capitalizing on the successful concept of model scaling (e.g., width, depth), we are able to simultaneously construct such a form and specify the exact models which can attain it across model/data scales. Our construction follows insights obtained from observations conducted over a range of model/data scales, in various model types and datasets, in vision and language tasks. We show that the form both fits the observations well across scales, and provides accurate predictions from small- to large-scale models and data.
14
+
15
+ # 1 INTRODUCTION
16
+
17
+ With the success and heightened adoption of neural networks for real world tasks, some questions remain poorly answered. For a given task and model architecture, how much data would one require to reach a prescribed performance level? How big a model would be needed?
18
+
19
+ Addressing such questions is made especially difficult by the mounting evidence that large, deep neural networks trained on large-scale data outperform their smaller counterparts, rendering the training of high performance models prohibitively costly. Indeed, in the absence of practical answers to the above questions, surrogate approaches have proven useful. One such common approach is model scaling, where one designs and compares small-scale models, and applies the obtained architectural principles at a larger scale (e.g., Liu et al., 2018; Real et al., 2018; Zoph et al., 2018). Despite these heuristics being widely used to various degrees of success, the relation between the performance of a model in the small- and large-scale settings is not well understood. Hence, exploring the limitations or improving the efficiency of such methods remains subject to trial and error.
20
+
21
+ In this work we circle back to the fundamental question: what is the (functional) relation between generalization error and model and dataset sizes? Critically, we capitalize on the concept of model scaling in its strictest form: we consider the case where there is some given scaling policy that completely defines how to scale up a model from small to large scales. We include in this context all model parameters, such that traversing from one scale (in which all parameters are known) to another requires no additional resources for specifying the model (e.g., architecture search/design).
22
+
23
+ We empirically explore the behavior of the generalization error over a wide range of datasets and models in vision and language tasks. While the error landscape seems fairly complex at first glance, we observe the emergence of several key characteristics shared across benchmarks and domains. Chief among these characteristics is the emergence of regions where power-law behavior approximates the error well both with respect to data size, when holding model size fixed, and vice versa.
24
+
25
+ Motivated by these observations, we establish criteria which a function approximating the error landscape should meet. We propose an intuitive candidate for such a function and evaluate its quality, both in explaining the observed error landscapes and in extrapolating from small scale (seen) to large scale (unseen) errors. Critically, our functional approximation of the error depends on both
26
+
27
+ model and data sizes. We find that this function leads to a high quality fit and extrapolation. For instance, the mean and standard deviation of the relative errors are under $2\%$ when fitting across all scales investigated and under $5\%$ when extrapolating from a slimmed-down model (1/16 of the parameters) on a fraction of the training data (1/8 of the examples) on the ImageNet (Russakovsky et al., 2015) and WikiText-103 (Merit et al., 2016) datasets, with similar results for other datasets.
28
+
29
+ To the best of our knowledge, this is the first work that provides simultaneously:
30
+
31
+ - A joint functional form of the generalization error landscape—as dependent on both data and model size—with few, interpretable degrees of freedom (section 5).
32
+ - Direct and complete specification (via the scaling policy) of the model configuration attaining said generalization error across model and dataset sizes.
33
+ - Highly accurate approximation of error measurements across model and data scales via the functional form, evaluated on different models, datasets, and tasks (section 6).
34
+ - Highly accurate error prediction from small to large model and data (section 7).
35
+
36
+ We conclude with a discussion of some implications of our findings as a practical and principled tool for understanding network design at small scale and for efficient computation and trade-off design in general. We hope this work also provides a useful empirical leg to stand on and an invitation to search for a theory of generalization error which accounts for our findings.
37
+
38
+ # 2 RELATED WORK
39
+
40
+ Model scaling: A number of studies have explored the effect of model scaling on performance. For instance, image classification networks can be scaled by depth (number of layers; He et al., 2016) or width (number of channels; Zagoruyko & Komodakis, 2016; Howard et al., 2017). More recently, Tan & Le (2019) demonstrated how scaling width, depth, and input resolution has combined positive effects larger than scaling each factor in isolation. However, this relationship has yet to be quantified in a predictive form – by how much will error change with model scaling? In this work, we focus on finding a constructive functional form for determining the model given a specified performance.
41
+
42
+ Data scaling: It has long been recognized that more data improves performance, and various studies report such trends in both computer vision (e.g., Zhu et al., 2012; Sun et al., 2017) and language processing tasks (e.g., Banko & Brill, 2001; Talmor & Berant, 2019). A number of prior studies observed power-law relations between the generalization error and training data size (Cho et al., 2015; Miceli Barone et al., 2017; Johnson et al., 2018). Most relevant to our work, Hestness et al. (2017) explored the effect of data size on the generalization error in vision, language, and speech tasks, and observed a strikingly consistent power-law behavior in a large set of experiments. However, while these studies point to the empirical existence of a power law in terms of data, they do not offer tools for predicting the performance given a specified model. Nor do they offer low-cost methods to specify the model configuration which would attain the power law with data dependency. Indeed, Hestness et al. had to search over models and their configurations at large scale to exhibit their findings, incurring prohibitive computational costs.
43
+
44
+ In contrast, we demonstrate a constructive recipe, where we directly predict the test performance at large scale and specify the full model configuration which attains it (with no need for large-scale search), given performance at small scale.
45
+
46
+ Predicting model performance: Since training models at full data/model scale may be computationally prohibitive, a line of work tries to predict the performance of a given model on a given dataset, without training the model, for example by using a bank of previously trained models, dataset, and their associated performances (Istrate et al., 2019). Others have proposed to estimate performance on small data (Klein et al., 2017) or model sizes (Zoph et al., 2018; Real et al., 2019) in the context of neural architecture search (NAS). In this case, the small-scale evaluation is used to compare models at small cost, to expedite the search process; see Elsken et al. (2019) for a recent survey. Our work complements previous approaches by demonstrating a functional form that can predict large-scale performance from small-scale measurements. Moreover, our method may be integrated in NAS, addressing some of its current limitations (as discussed in section 8).
47
+
48
+ Table 1: The datasets and models used in this work, along with their original training data size and the range of explored scales. For more information, see appendix A.
49
+ (a) Training data size (number of words) and model size (number of parameters excluding word embeddings) for language modeling tasks.
50
+
51
+ <table><tr><td>Dataset</td><td>Size (N)</td><td>Scales (n)</td><td>Base Model</td><td>Size (M)</td><td>Scales (m)</td></tr><tr><td>PTB</td><td>0.9M</td><td rowspan="3">2-kN, 0 ≤ k ≤ 5</td><td>AWD-LSTM</td><td>20M</td><td rowspan="3">4-kM, 0 ≤ k ≤ 6</td></tr><tr><td>WikiText-2</td><td>2M</td><td>AWD-LSTM</td><td>20M</td></tr><tr><td>WikiText-103</td><td>100M</td><td>Transformer-XL</td><td>41M</td></tr></table>
52
+
53
+ (b) Training data size (number of images) and model size (number of parameters) for image classification tasks.
54
+
55
+ <table><tr><td>Dataset</td><td>Size (N)</td><td>Scales (n)</td><td>Base Model</td><td>Size (M)</td><td>Scales (m)</td></tr><tr><td>ImageNet</td><td>1.2M</td><td>2-kN, 0 ≤ k ≤ 6</td><td>ResNet-50</td><td>25.5M</td><td>4-kM, 0 ≤ k ≤ 6</td></tr><tr><td>CIFAR10</td><td>60K</td><td></td><td>WRN-44-16</td><td>0.7M</td><td>4-kM, -3 ≤ k ≤ 4</td></tr><tr><td>CIFAR100</td><td>60K</td><td rowspan="4">2-kN, 0 ≤ k ≤ 5</td><td>WRN-44-16</td><td>0.7M</td><td></td></tr><tr><td>DTD</td><td>5640</td><td>WRN-44-16</td><td>0.7M</td><td>4-kM, -2 ≤ k ≤ 4</td></tr><tr><td>Aircraft</td><td>10K</td><td>WRN-44-16</td><td>0.7M</td><td></td></tr><tr><td>UCF101</td><td>13K</td><td>WRN-44-16</td><td>0.7M</td><td></td></tr></table>
56
+
57
+ Theoretical error bounds: Much attention has been given to theoretical explanations of the generalization capabilities of deep neural networks (Neyshabur et al., 2017a;b; Allen-Zhu et al., 2018a;b; Arora et al., 2018). While fully engaging with this literature is beyond our scope, we note that recent studies have derived bounds involving power-law dependencies in both model (Yarotsky, 2018) and data size (Liang et al., 2019). We leave it as an open question for future work to find theoretical explanations for the empirical behavior and the functional form we investigate in this work.
58
+
59
+ # 3 EXPERIMENTAL SETUP
60
+
61
+ Notation: Let $\mathbb{D}_n = \{\pmb{x}_i, y_i\}_{i=1}^n$ denote a labeled (training) dataset with $n$ samples or datapoints. Let $f_m$ denote a neural network whose size is the number of parameters $m$ , such that $\hat{y} = f_m(\pmb{x})$ is the predicted label. Let $\epsilon(n, m)$ be the generalization error as a function of $n$ and $m$ , measured by a performance metric (e.g., top-1 accuracy or cross-entropy loss) on a held-out test set. We refer to this error function as the error landscape.
62
+
63
+ # 3.1 SCALING POLICIES
64
+
65
+ Dataset scaling: We wish to scale datasets while preserving the original distribution. For image classification, we uniformly subsample all classes by a constant ratio, thus preserving the relative sample size per class. We limit the maximal sub-sampling to avoid eradicating any class. For language modeling, where the number of classes (vocabulary items) has a very long tail distribution, we randomly sample sentences such that the total number of sampled words will be a certain fraction of the original dataset. Table 1 reports the data scales we use. In all tasks the held-out test set remains untouched for evaluating the error.
66
+
67
+ Model scaling: We are critically interested in a method where moving across scales is defined by some scaling function, such that no additional significant computation would be incurred. We thus consider the case where the model architecture is given and the model size determines how to scale it. For instance, one may scale width (number of channels in convolutional networks, hidden state size in recurrent networks), depth (number of layers), do compound scaling (Tan & Le, 2019), or more generally define a function tying the model degrees of freedom and size. We focus primarily on width scaling in our experiments; the model scales are reported in Table 1. We also perform selected depth scaling to demonstrate flexibility with respect to the scaling method.
68
+
69
+ ![](images/99025f970685e31980c352b64485908c30c5761e0590bd10214ba5521a05004f.jpg)
70
+ (a) Wiki103 error (cross entropy) landscape.
71
+
72
+ ![](images/c5b5190e81703f1cf15f49f86b2a6f5b680b8ac5087fd5b83229c710d5732912.jpg)
73
+ (b) CIFAR10 error (top1) landscape.
74
+ Figure 1: Error landscapes in log-log-log scale. Each point (blue dot) is the error resulting from training with a model/data configuration $m, n$ . The surface is a linear interpolation between the points, which is then projected on the $(m, \epsilon), (n, \epsilon)$ and $(m, n)$ planes. See Appendix C for details.
75
+
76
+ ![](images/92d5b84b74b0c9004ce3fac7e1790a6198d4da88d3ece0e2e5589fa9dca48ad5.jpg)
77
+ (a) Wiki103 cross entropy vs. data and model size.
78
+
79
+ ![](images/16049b917833181ba767827d3c36675773690fbea389733fd0ddcafdb3d99ab2.jpg)
80
+ Figure 2: Error vs. data size (left part of each subfigure) and model size (right part) for Wiki103 and CIFAR10. Solid dots are measurements, dashed lines are best fit to saturating power-law.
81
+
82
+ ![](images/c42383c5b9c4d8c7591bae753fd4bf2bc16721afb39e44fcf5a7b57201beb5ae.jpg)
83
+ (b) CIFAR10 top1 error vs. data and model size.
84
+
85
+ ![](images/1a7c5ab55b6cf4277ffa5badeafe62ded9ee4035f6777bdff3396e9c5c57628b.jpg)
86
+
87
+ Hyper-parameters: For similar reasons we wish to avoid hyper-parameter search at large scales, and thus avoid the temptation to tune hyper-parameters accordingly (learning rate, regularization, etc.). Therefore, we hold all hyper-parameters fixed. This enables us to construct a functional form that fits the error landscape and can be used to predict the error across scales while completely defining the model attaining it. We consider pros and cons of this approach in the discussion (section 8).
88
+
89
+ # 3.2 TASKS, MODELS, OPTIMIZERS AND DATASETS
90
+
91
+ We experiment with both vision and language tasks. We use 6 benchmark datasets for image classification and 3 for language modeling. For image classification, we train ResNet (He et al., 2016) and WRN models (Zagoruyko & Komodakis, 2016) with stochastic gradient decent (SGD). In section 6.2 we explore the effect of varying architectures and optimizers for a fixed task (CIFAR100), adding VGG16 (Simonyan & Zisserman, 2014) and DenseNet (Huang et al., 2017) models trained with both Adam (Kingma & Ba, 2015) and SGD. For language modeling, we train AWD-LSTM (Merit et al., 2018) and Transformer-XL models (Dai et al., 2019) with SGD and Adam optimizers respectively. Summary statistics are shown in Table 1, along with the range of explored scales. Appendix A gives additional information.
92
+
93
+ # 4 OBSERVATIONS ON THE ERROR LANDSCAPE
94
+
95
+ Figures 1a and 1b respectively show an example test error landscape for width scaling of Transformer-XL on WikiText-103 and WRN-44-16 on CIFAR10. Various additional such landscapes are found in appendix C, showing largely consistent patterns. Examining the error landscapes yields the following observations:
96
+
97
+ # O1 Model Scaling
98
+
99
+ O1.1 For a given dataset size, scaling up the model results in an initial decrease in test error, which then saturates to a level determined by the dataset size. This behavior has been noted by Tan & Le (2019) across varied model scaling methods, although they have not engaged with the dependency on dataset size.
100
+ O1.2 The rate of error decrease with model size appears well approximated by a power-law.
101
+
102
+ These two observations together can be summarized as the following relation:
103
+
104
+ $$
105
+ \epsilon (m, n) \approx b (n) m ^ {- \beta (n)} + c _ {m} (n) \tag {1}
106
+ $$
107
+
108
+ where $b, \beta, c_m$ may depend on the data size $n$ , s.t. as $m$ grows, $\epsilon \rightarrow c_m$ . Example fits to this form (allowing $b, \beta, c_m$ to be fit per $n$ ) are seen in figure 2a (right) and figure 2b (right).
109
+
110
+ # O2 Data scaling
111
+
112
+ O2.1 For a given model size, scaling up the dataset results in an initial increase in performance, which then saturates to a level determined by the model size.
113
+ O2.2 The rate of error decrease with dataset size appears well approximated by a power-law. Hestness et al. (2017) also noted a similar relationship, but did not functionally tie the saturation level to the dataset size.
114
+
115
+ These two observations together can be summarized as the following relation:
116
+
117
+ $$
118
+ \epsilon (m, n) \approx a (m) n ^ {- \alpha (m)} + c _ {n} (m) \tag {2}
119
+ $$
120
+
121
+ where $a, \alpha, c_n$ may depend on the model size $m$ , s.t. as $n$ grows, $\epsilon \to c_n$ . Example fits to this form (allowing $a, \alpha, c_n$ to be fit per $m$ ) are seen in figure 2a (left) and figure 2b (left).
122
+
123
+ O3 Joint properties The behavior of the error when scaling model size while holding data size fixed, and vice versa, extends to the entire error landscape in a well-behaved manner, such that the manifold $\epsilon(m,n)$ is smooth everywhere as a function of both model and data scales.
124
+
125
+ # 5 FUNCTIONAL APPROXIMATION OF THE GENERALIZATION ERROR
126
+
127
+ # 5.1 CRITERIA
128
+
129
+ Motivated by the above observations, we now consider a functional approximation for the error landscape. In particular, let us consider function families meeting the following criteria which augment and restrict our observations:
130
+
131
+ C1 As either model or dataset size goes to zero, the expected performance is equivalent to a random-guess error level $\epsilon_0$ .<sup>2</sup>
132
+ C2 For a given dataset size, scaling up the model will result in an initial increase in performance, which will then saturate, taking the form in equation 1.
133
+ C3 For a given model size, scaling up the dataset will result in an initial increase in performance, which will then saturate, taking the form in equation 2.
134
+ C4 There exists an irreducible error $\epsilon_{\infty}$ , intrinsic to the dataset.
135
+ C5 The function must be smooth everywhere and monotonic non-increasing in terms of model and data size (observation O3).
136
+
137
+ While there are many possible function families meeting the above criteria, below we propose a simple function family for our evaluation. We do not claim that this is in fact the true underlying dependency, but rather that it serves as a good approximation of the error landscape—consistent with these criteria.
138
+
139
+ # 5.2 PROPOSED FUNCTION FAMILY
140
+
141
+ As a first insightful step, consider the implications of satisfying C2 and C3 simultaneously. By examining the limiting behavior as $m$ or $n$ grow, we have:
142
+
143
+ As $m$ grows large:
144
+
145
+ $$
146
+ c _ {m} (n) \approx a (m) n ^ {- \alpha (m)} + c _ {n} (m)
147
+ $$
148
+
149
+ As $n$ grows large:
150
+
151
+ $$
152
+ c _ {n} (m) \approx b (n) m ^ {- \beta (n)} + c _ {m} (n)
153
+ $$
154
+
155
+ Thus, a consistent form satisfying C2 and C3 simultaneously is:
156
+
157
+ $$
158
+ \epsilon (m, n) \approx a (m) n ^ {- \alpha (m)} + b (n) m ^ {- \beta (n)} + c _ {\infty} \tag {3}
159
+ $$
160
+
161
+ where $c_{\infty}$ is a constant not dependent on either $m$ or $n$ .
162
+
163
+ Let us now examine the simplified case where $a, b, \alpha, \beta$ are constant:
164
+
165
+ $$
166
+ \tilde {\epsilon} (m, n) = a n ^ {- \alpha} + b m ^ {- \beta} + c _ {\infty} \tag {4}
167
+ $$
168
+
169
+ where $\alpha \geq 0$ and $\beta \geq 0$ control the global rate at which error decreases with data and model size, respectively, $a > 0$ and $b > 0$ are a form of unit conversion between data and model sizes and error, and $c_{\infty} > 0$ is the asymptotic lower value attainable. This function is a special case of equation 3 and meets criteria C2 and C3 by construction. Importantly C4 and C5 are also met.
170
+
171
+ However, by giving up the dependence of $a, b, \alpha, \beta$ on $m, n$ , this function does not meet criterion C1. We thus need to model the transition from the initial random-guess level to the power-law region. We propose to parameterize the transition using the following envelope (complex) function:
172
+
173
+ $$
174
+ \hat {\epsilon} (m, n) = \epsilon_ {0} \left\| \frac {\tilde {\epsilon} (m , n)}{\tilde {\epsilon} (m , n) - i \eta} \right\| = \epsilon_ {0} \left\| \frac {a n ^ {- \alpha} + b m ^ {- \beta} + c _ {\infty}}{a n ^ {- \alpha} + b m ^ {- \beta} + c _ {\infty} - i \eta} \right\| \tag {5}
175
+ $$
176
+
177
+ where $i = \sqrt{-1}$ . Here the simple pole at $\eta$ controls the transition point from the initial random-guess level $\epsilon_0$ as $(m,n)$ increase. As $(m,n)$ grow, $\tilde{\epsilon} \rightarrow c_{\infty}$ and the final irreducible error $\epsilon_{\infty} \triangleq \epsilon_0 c_{\infty} \eta^{-1}$ is approached. The random-guess error, $\epsilon_0$ , is a known parameter determined by dataset statistics (e.g., $(N_{\text{classes}} - 1) / N_{\text{classes}}$ for a balanced dataset). Note that due to our choice of rational envelope, we can divide by a constant the form in equation 4. Without loss of generality, let us choose $a = 1$ .
178
+
179
+ Note that while the forms in equations 3 and 4 are well motivated, the approach taken for modeling the transition is solely a convenience one. In fact, the transition(s) as function of $m$ and $n$ may be captured in the functional forms of $a, b, \alpha, \beta$ or another envelope mechanism. We leave a more refined investigation of the nature of the transitions to future work.
180
+
181
+ # 6 ERROR LANDSCAPE ESTIMATION
182
+
183
+ We wish to empirically estimate the quality of the proposed functional parameterization as a fit to the true error landscape. Let $\hat{\epsilon}(n, m; \theta)$ be the parametric function family (equation 5) approximating the error landscape $\epsilon(n, m)$ , where $\pmb{\theta} = \{\alpha, \beta, b, c_{\infty}, \eta\}$ . Define the divergence $\delta(n, m; \pmb{\theta})$ as the relative difference between the estimated error $\hat{\epsilon}(m, n; \pmb{\theta})$ and the true error $\epsilon(m, n)$ :
184
+
185
+ $$
186
+ \delta (n, m; \pmb {\theta}) \triangleq \frac {\hat {\epsilon} (m , n ; \pmb {\theta}) - \epsilon (m , n)}{\epsilon (m , n)}
187
+ $$
188
+
189
+ We fit a least squares regression model to find the best parameters minimizing the divergence. In this section, we fit the function using 10-fold cross-validation across all model/data configurations $m$ , $n$ (see Table 1) and evaluate the fit quality. (In the next section, we perform extrapolation experiments, from seen to unseen points.) We perform the fit separately for each dataset and evaluate its quality by the mean $\mu$ and standard deviation $\sigma$ of the divergence $\delta$ over all points $(m,n)$ . See Appendix B.1 for experimental details.
190
+
191
+ As figure 3 shows, estimated test accuracy is highly correlated with actual test accuracy for various datasets, with worst-case values $\mu < 1\%$ and $\sigma < 5\%$ . Note that the number of free parameters is small ( $|\theta| \leq 6$ ) compared to the number of points (42-49 model-data configurations), demonstrating the appropriateness of the proposed function for modeling the complex error landscape.
192
+
193
+ ![](images/1115c844e3e4cbcb311574ddc5b875ca16e0e51bf27094367020aa0b693c7976.jpg)
194
+ (a) Estimated vs. actual cross-entropy loss for various language modeling datasets.
195
+
196
+ ![](images/821c716e86d9cc4c0e5f3452197f8a21be6682d679178730dac1fabe72c1b90e.jpg)
197
+ (b) Estimated vs. actual test error for various image classification datasets.
198
+
199
+ ![](images/19526bef8fd02863ef4e2f89e66bda60b016ae4dbaefd0b19a49399fc16107e2.jpg)
200
+ Figure 3: Error estimation results, using 10-fold cross-validation on all configurations in each dataset. For reference, in blue is the identity line. The legend shows mean $\mu$ and standard deviation $\sigma$ of the divergence $\delta$ ( $\pm$ one std). See Appendix C for the actual and estimated landscapes in each dataset.
201
+
202
+ ![](images/e38010a46a29fb26c1116d3b333ec8d1a311cb4ec3bb572066fc700ecdbe73ec.jpg)
203
+ (a) Error landscape when scaling depth (at constant baseline width).
204
+ (b) Width scaling fit at different constant depths (D).
205
+
206
+ ![](images/9b8199a5370cec728bd326dd1bfb5cddab5a91ce60e98787cb2e1400a507a83b.jpg)
207
+ (c) Depth scaling fit at different constant widths (W).
208
+ Figure 4: Error landscape estimation results on CIFAR10 for width and depth scaling, showing small and comparable fit errors in both cases. Numbers in legends denote mean/variance of the estimation divergence.
209
+
210
+ # 6.1 A PROBE INTO DEPTH SCALING
211
+
212
+ Here we verify that our results extend to another canonical scaling policy, namely depth scaling. Figure 4a shows the error landscape with depth scaling on CIFAR10, exhibiting the same characteristics as width scaling. Figures 4b and 4c show error landscape estimation results for both cases of width and depth scaling, exhibiting small and comparable fit errors (confidence intervals $< 3\%$ ).
213
+
214
+ Since the difference in approximation quality is effectively indistinguishable when scaling depth or width orthogonally, we expect compound scaling to adhere to the same functional form. Indeed, we verified this on the publicly available (model scaling only) results for EfficientNet (Tan & Le, 2019).
215
+
216
+ # 6.2 ON THE VARIETY OF OPTIMIZERS AND ARCHITECTURES
217
+
218
+ Our study covers a deliberate variety of architectures (ResNet, WRN, LSTM, Transformer) and optimizers (Adam, SGD variants), following standard implementations in the literature as recommended for each dataset/model setting; see Appendix A.
219
+
220
+ ![](images/11de342d739e3328247ab0f2f276e62af1be79cc9db2706e54580416c808e546.jpg)
221
+ (a) Illustration.
222
+
223
+ ![](images/56dffbc42317c470f027cc69a6701c3b406dfbe4b1ce63f1675a662f157490c6.jpg)
224
+ (b) Extrapolation on ImageNet
225
+
226
+ ![](images/4a945cb2d7db096b0e67e0e4e063713cf8d4d9f0ca61c603c2f89258444f8413.jpg)
227
+ (c) Extrapolation on WikiText-103.
228
+ Figure 6: Extrapolation results. (a) Illustration of the extrapolation setup, where we fit on a subset of the points (in green) and predict on larger points (in red). (b) and (c) show example results on one configuration in two benchmark datasets. Comprehensive results are given in Appendix D.
229
+
230
+ However, the model/optimizer settings differ in multiple aspects across the different tasks, rendering the comparison of, say, different optimizers, challenging. In this section we verify that the functional form holds when varying the optimizer and/or the architecture on the same task, namely image classification on CIFAR100.
231
+
232
+ In addition to the previously examined setting of WRN with SGD, we add four more settings: two well known architectures (VGG and DenseNet), each trained with both SGD and Adam optimizers. See Appendix A for experimental details. Figure 5 exhibits consistent, accurate, fit values across all architecture/optimizer settings, with mean divergence of $\mu < 1\%$ (std: $\sigma < 6\%$ ; confidence intervals $< 4\%$ ).
233
+
234
+ ![](images/a07a73c4d7390f54d859dc4dc5a2f02595b6b56cfeab7cd29b46f439899c1ed9.jpg)
235
+ Figure 5: CIFAR100 Error estimation results with three architectures (WRN, VGG, DenseNet) and two optimizers (SGD, Adam).
236
+
237
+ # 7 EXTRAPOLATION
238
+
239
+ In this section, we evaluate the ability of our functional approximation to extrapolate beyond seen model/data configurations. The primary question we ask is: can we predict the error of a large model/data configuration from the errors of smaller-scale model/data configurations? To do this, we fit the least squares regression on a subset of the configurations and predict the error on larger, unseen configurations. More formally, let $(m_i,n_j)$ denote a given model/data configuration. We first estimate parameters $\theta_{ij}$ by fitting the function in equation 5 on all points of at most that size ( $m\leq m_i,n\leq n_j$ ). Then we predict the error $\epsilon (m,n)$ in all points corresponding to larger configurations ( $m > m_i,n > n_j$ ) using estimated $\theta_{ij}$ . Finally, we measure the divergence $\delta (m,n)$ between the estimated error and the actual error at all larger configurations. This process is illustrated in figure 6a.
240
+
241
+ Figure 6b shows the results of one such extrapolation experiment, on ImageNet. In this case, we have fit the functional form on all configurations of model size $m \leq m_i = M / 16$ and data size $n \leq n_j = N / 8$ , and predicted the error on all larger configurations. As the figure shows, the extrapolation is highly accurate, with a mean divergence of $\mu = 4.5\%$ (std: $\sigma = 4.7\%$ ). Figure 6c reports a similar experiment on WikiText-103. Here, again, we see very good extrapolation, with a mean divergence of $\mu = 0.5\%$ (std: $\sigma = 1.7\%$ ). Note that each extrapolation is run 10 times with different random initializations of $\theta_{ij}$ in the least squares with negligible effect on the prediction.
242
+
243
+ In practice, we may be interested in extrapolation quality with different subsets of configurations. Appendix D provides detailed extrapolation results on multiple subsets of configurations, for both vision and language datasets. Generally, the extrapolation performs well once not ill-posed, which may be caused by lack of signal in the region of the initial "random-guess" level, or in degenerate cases like having fewer measurements than the number of free parameters in $\theta$ .
244
+
245
+ # 8 DISCUSSION AND CONCLUSION
246
+
247
+ In this work, through insights gained by the joint examination of the dependencies of generalization error on both model and data size, we arrive at criteria for functions consistent with the form of the generalization error under a given scaling policy. We consider one such function and find it to be in very good agreement with the actual behavior of the error landscape. Indeed, the agreement is strong enough that extrapolation from small to large scale becomes feasible: the function predicts the behavior of the generalization error in practice for the practical case of scaling models and data. We discuss several example implications of knowing such a functional form.
248
+
249
+ Small-scale network development: At the core of small fidelity searches is the notion of performance rank comparison between models. However, small scale and large scale ranks are not assured to be consistent. If indeed a functional form such as empirically found in this work holds very generally, then in contrast, one can safely assess scaling rank between models at small scale, with the assurance that it remains consistent. This suggests that one would be well served by searching over scaling policies; a pertinent example of such a success is Tan & Le (2019). The functional form also explains the limitation of small-scale search: once reaching the random-guess error level, where the sensitivity to scaling vanishes, the informativeness of ranking diminishes. Finally, the functional form allows direct usage of differentiable methods for NAS.
250
+
251
+ Principled design: Knowing the error landscape function facilitates reasoning about the choice of $(m,n)$ attaining a specified error level. In other words, for any given error level, one can solve Eq. 5 for $m,n$ based on small-scale measurements. Thus, one can quantitatively answer design questions regarding the expected (in particular, large-scale) relations between $m,n$ , and $\epsilon$ . In fact, Eq. 5 provides direct answers to questions such as "how much data would one require to reach a prescribed performance level?" or "how big a model would be needed?" Imposing constraints is also straightforward. For instance, consider the following question: "What is the maximal model size possibly needed (useful), when the data is limited in size, $n = n_{lim}$ (for a given model architecture and scaling policy)?" For a fixed dataset size, model scaling eventually contributes marginally to error reduction and becomes negligible when $bm^{-\beta} \ll n_{lim}^{-\alpha}$ (Eq. 5). Define the relative contribution threshold $T$ as satisfying $T = \frac{n_{lim}^{-\alpha}}{bm_{max}^{-\beta}}$ . (For example, $T = 10$ .) Then the maximal useful model size meeting threshold $T$ is:
252
+
253
+ $$
254
+ m _ {m a x} (T) = (b T) ^ {1 / \beta} n _ {l i m} ^ {\alpha / \beta}
255
+ $$
256
+
257
+ Similarly, The maximal useful amount of data for a limited sized model $m_{lim}$ is:
258
+
259
+ $$
260
+ n _ {m a x} (T) = (1 / b T) ^ {1 / \alpha} m _ {l i m} ^ {\beta / \alpha}
261
+ $$
262
+
263
+ Moreover, Eq. 5 allows for complex design trade-offs. Generally, given some design-tradeoff cost function $C(m,n,\epsilon)$ , one can minimize such cost s.t. Eq. 5. For example, consider the case of optimizing for efficient computation which has both practical and environmental importance (Schwartz et al., 2019). Since the number of FLOPs during training is $\propto m\cdot n$ (for constant epoch budget), the trade-off cost function may be formulated as $C(\mathrm{FLOPS},\epsilon) = C(mn,\epsilon)$ . Further, since constant error contour is very well approximated by $c = \frac{1}{n^{\alpha}} +\frac{b}{m^{\beta}}$ (Eq. 5), dataset and models may be scaled with optimal resource efficiency with no effect on performance by solving for:
264
+
265
+ $$
266
+ \underset {m, n} {\operatorname {a r g m i n}} \quad m \cdot n \qquad \text {s . t .} \quad c = \frac {1}{n ^ {\alpha}} + \frac {b}{m ^ {\beta}}
267
+ $$
268
+
269
+ The solution gives us the optimal-computational-efficiency ratio of model to data size: $\frac{b\beta}{\alpha}\frac{n^{\alpha}}{m^{\beta}} = 1$
270
+
271
+ Limitations: We have made a few simplifying assumptions in our choice of approximating function, in particular in how to model the transition from the initial random-guess error level and the union of the random-guess level of the two scenarios (small model with large data and large model with small data). We leave a more detailed examination of the behavior of the transitions from random-guess error levels and refinements of the functional form to future work.
272
+
273
+ Critically, the restrictive nature of our scaling framework (all parameters and hyperparameters described by a policy) is both a blessing and a challenge. The blessing comes in fulfilling the goal of finding simultaneously both the form of the generalization error and the full specification of the model and hyperparameters that attain it across scales. The challenge is that we have demonstrated in this work only the case of constant hyper-parameters. We conjecture that the relation between model configuration and hyperparameter choice (Zela et al., 2018) may entail the potential to formulate hyperparameter-scaling policies similar in nature to the model-scaling policies, and that these too fall under the scope of the form we find in this work. This too will be the subject of future work.
274
+
275
+ We hope that this work will bring the actual functional form of the generalization error in this practical case of scaling to the fore, both in practice and as an empirical leg to stand on in the quest for its theoretical origins.
276
+
277
+ # ACKNOWLEDGMENTS
278
+
279
+ We thank Alexander Rakhlin, Alexander Madry, Kai Xiao, Lu Mi, Viaks Garg, Dan Alistrah, and Tommi Jaakkola for discussions and their help. We also thank the anonymous reviewers for their valuable feedback. J.R. was partly supported by the Eli and Dorothy Berman Fellowship as well as grants NSF IIS-1447786, NSF CCF-1563880 and China-Singapore Suzhou Industrial Park. A.R. was partially supported by the Air Force Office of Scientific Research USA (FA9550-18-1-0054) though a grant to John K. Tsotsos. Y.B. was partly supported by the Harvard Mind ,Brain, and Behavior Initiative.
280
+
281
+ # REFERENCES
282
+
283
+ Zeyuan Allen-Zhu, Yanzhi Li, and Yingyu Liang. Learning and generalization in overparameterized neural networks, going beyond two layers. arXiv preprint arXiv:1811.04918, 2018a.
284
+ Zeyuan Allen-Zhu, Yanzhi Li, and Zhao Song. On the convergence rate of training recurrent neural networks. arXiv preprint arXiv:1810.12065, 2018b.
285
+ Sanjeev Arora, Rong Ge, Behnam Neyshabur, and Yi Zhang. Stronger generalization bounds for deep nets via a compression approach. arXiv preprint arXiv:1802.05296, 2018.
286
+ Michele Banko and Eric Brill. Mitigating the paucity-of-data problem: Exploring the effect of training corpus size on classifier performance for natural language processing. In Proceedings of the first international conference on Human language technology research, pp. 1-5. Association for Computational Linguistics, 2001.
287
+ Hakan Bilen, Basura Fernando, Efstratios Gavves, Andrea Vedaldi, and Stephen Gould. Dynamic image networks for action recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3034-3042, 2016.
288
+ James Bradbury, Stephen Merity, Caiming Xiong, and Richard Socher. Quasi-recurrent neural networks. In International Conference on Learning Representations, 2017.
289
+ Junghwan Cho, Kyewook Lee, Ellie Shin, Garry Choy, and Synho Do. How much data is needed to train a medical image deep learning system to achieve necessary high accuracy? arXiv preprint arXiv:1511.06348, 2015.
290
+ Mircea Cimpoi, Subhransu Maji, Iasonas Kokkinos, Sammy Mohamed, and Andrea Vedaldi. Describing textures in the wild. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3606-3613, 2014.
291
+
292
+ Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc Le, and Ruslan Salakhutdinov. Transformer-XL: Attentive language models beyond a fixed-length context. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 2978-2988, Florence, Italy, July 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1285. URL https://www.aclweb.org/anthology/P19-1285.
293
+ Thomas Elsken, Jan Hendrik Metzen, and Frank Hutter. Neural architecture search: A survey. Journal of Machine Learning Research, 20(55):1-21, 2019.
294
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. pp. 770-778, 2016.
295
+ Joel Hestness, Sharan Narang, Newsha Ardalani, Gregory Diamos, Heewoo Jun, Hassan Kianinejad, Md Patwary, Mostofa Ali, Yang Yang, and Yanqi Zhou. Deep learning scaling is predictable, empirically. arXiv preprint arXiv:1712.00409, 2017.
296
+ Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural computation, 9(8): 1735-1780, 1997.
297
+ Elad Hoffer, Itay Hubara, and Daniel Soudry. Fix your classifier: the marginal value of training the last weight layer. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=S1Dh8Tg0-.
298
+ Andrew G Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam. Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861, 2017.
299
+ Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. Densely connected convolutional networks. 1(2):3, 2017.
300
+ Roxana Istrate, Florian Scheidegger, Giovanni Mariani, Dimitrios Nikolopoulos, Costas Bekas, and A Cristiano I Malossi. Tapas: Train-less accuracy predictor for architecture search. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 3927-3934, 2019.
301
+ Mark Johnson, Peter Anderson, Mark Dras, and Mark Steedman. Predicting accuracy on large datasets from smaller pilot data. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pp. 450-455, Melbourne, Australia, July 2018. Association for Computational Linguistics. doi: 10.18653/v1/P18-2072. URL https://www.aclweb.org/anthology/P18-2072.
302
+ Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. 2015.
303
+ Aaron Klein, Stefan Falkner, Simon Bartels, Philipp Hennig, and Frank Hutter. Fast bayesian optimization of machine learning hyperparameters on large datasets. In Artificial Intelligence and Statistics, pp. 528-536, 2017.
304
+ Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. Technical report, Citeseer, 2009.
305
+ Tengyuan Liang, Alexander Rakhlin, and Xiyu Zhai. On the risk of minimum-norm interpolants and restricted lower isometry of kernels. arXiv preprint arXiv:1908.10292, 2019.
306
+ Hanxiao Liu, Karen Simonyan, and Yiming Yang. Darts: Differentiable architecture search. arXiv preprint arXiv:1806.09055, 2018.
307
+ Subhransu Maji, Esa Rahtu, Juho Kannala, Matthew Blaschko, and Andrea Vedaldi. Fine-grained visual classification of aircraft. arXiv preprint arXiv:1306.5151, 2013.
308
+ Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. Pointer sentinel mixture models. arXiv preprint arXiv:1609.07843, 2016.
309
+ Stephen Merity, Nitish Shirish Keskar, and Richard Socher. Regularizing and optimizing LSTM language models. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=SyyGPPOTZ.
310
+
311
+ Antonio Valerio Miceli Barone, Barry Haddow, Ulrich Germann, and Rico Sennrich. Regularization techniques for fine-tuning in neural machine translation. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pp. 1489-1494, Copenhagen, Denmark, September 2017. Association for Computational Linguistics. doi: 10.18653/v1/D17-1156. URL https://www.aclweb.org/anthology/D17-1156.
312
+ Tomáš Mikolov, Martin Karafiát, Lukáš Burget, Jan Černocký, and Sanjeev Khudanpur. Recurrent neural network based language model. In Eleventh Annual Conference of the International Speech Communication Association, 2010.
313
+ Behnam Neyshabur, Srinadh Bhojanapalli, David McAllester, and Nati Srebro. Exploring generalization in deep learning. In Advances in Neural Information Processing Systems, pp. 5947-5956, 2017a.
314
+ Behnam Neyshabur, Srinadh Bhojanapalli, and Nathan Srebro. A pac-bayesian approach to spectrally-normalized margin bounds for neural networks. arXiv preprint arXiv:1707.09564, 2017b.
315
+ Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in PyTorch. In NIPS Autodiff Workshop, 2017.
316
+ E Real, A Aggarwal, Y Huang, and QV Le. Aging evolution for image classifier architecture search. In AAAI Conference on Artificial Intelligence, 2019.
317
+ Esteban Real, Alok Aggarwal, Yanping Huang, and Quoc V Le. Regularized evolution for image classifier architecture search. arXiv preprint arXiv:1802.01548, 2018.
318
+ Sylvestre-Alvise Rebuffi, Hakan Bilen, and Andrea Vedaldi. Learning multiple visual domains with residual adapters. In Advances in Neural Information Processing Systems, pp. 506-516, 2017.
319
+ Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015.
320
+ Roy Schwartz, Jesse Dodge, Noah A Smith, and Oren Etzioni. Green ai. arXiv preprint arXiv:1907.10597, 2019.
321
+ Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014.
322
+ Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402, 2012.
323
+ Chen Sun, Abhinav Shrivastava, Saurabh Singh, and Abhinav Gupta. Revisiting unreasonable effectiveness of data in deep learning era. In Proceedings of the IEEE international conference on computer vision, pp. 843-852, 2017.
324
+ Alon Talmor and Jonathan Berant. MultiQA: An empirical investigation of generalization and transfer in reading comprehension. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 4911-4921, Florence, Italy, July 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1485. URL https://www.aclweb.org/anthology/P19-1485.
325
+ Mingxing Tan and Quoc Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In International Conference on Machine Learning, pp. 6105-6114, 2019.
326
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems 30, pp. 5998-6008. Curran Associates, Inc., 2017. URL http://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf.
327
+
328
+ Zifeng Wu, Chunhua Shen, and Anton van den Hengel. Wider or deeper: Revisiting the resnet model for visual recognition. arXiv preprint arXiv:1611.10080, 2016.
329
+ Dmitry Yarotsky. Optimal approximation of continuous functions by very deep relu networks. arXiv preprint arXiv:1802.03620, 2018.
330
+ Sergey Zagoruyko and Nikos Komodakis. Wide residual networks. arXiv preprint arXiv:1605.07146, 2016.
331
+ Arber Zela, Aaron Klein, Stefan Falkner, and Frank Hutter. Towards automated deep learning: Efficient joint neural architecture and hyperparameter search. arXiv preprint arXiv:1807.06906, 2018.
332
+ Xiangxin Zhu, Carl Vondrick, Deva Ramanan, and Charless C Fowlkkes. Do we need more training data or better models for object detection?. In BMVC, volume 3, pp. 5. CiteSeer, 2012.
333
+ Barret Zoph, Vijay Vasudevan, Jonathon Shlens, and Quoc V Le. Learning transferable architectures for scalable image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 8697-8710, 2018.
334
+
335
+ # A DATASETS AND MODELS
336
+
337
+ # A.1 IMAGE CLASSIFICATION
338
+
339
+ # A.1.1 DATASETS
340
+
341
+ We evaluated our predictions on several popular image classification datasets: ImageNet (Russakovsky et al., 2015): a large-scale recognition benchmark consisting of natural images of 1000 object categories with 1.28M training images spread roughly uniformly over the categories. It has 50K validation and 100K testing images. It has been the most popular large-scale benchmark for image classification methods for the better part of the last decade. CIFAR10/100 (Krizhevsky et al., 2009): 60K natural RGB images of 10 classes (100 for CIFAR100) with a train/test split of 50K/10K. For each of the following datasets, we use the version collated, resized, and split into train/validation/test sets by Rebuffi et al. (2017). DTD (Cimpoi et al., 2014): a texture database of 47 categories and 5640 images. Aircraft (Maji et al., 2013): 10K images of 100 different aircraft classes. UCF101 (Soomro et al., 2012): originally a video action recognition dataset, converted using the method of Bilen et al. (2016) into a single image per video. It contains 13,320 images of 101 action classes.
342
+
343
+ # A.1.2 MODELS
344
+
345
+ We experiment with four models for image classification. We use different variants of the popular ResNet architecture (He et al., 2016) in the main experiments. For ImageNet we use ResNet-50 and build on the code from the PyTorch framework (Paszke et al., 2017) to vary the model width. For all other datasets we use WRN-44-16 (Wu et al., 2016) of varying widths, modified from the implementation of Hoffer et al. (2018).
346
+
347
+ Scaling the models' width is performed by multiplying the number of channels in each convolutional layer and the width of the hidden linear layers by a constant factor and rounding to the nearest integer. The ranges of width scales (and data scales) for the main experiments are detailed in Table 1b.
348
+
349
+ In section 6.2, we perform width scaling for two additional architectures, VGG16bn (Simonyan & Zisserman, 2014) and DenseNet $(\mathrm{L} = 40,\mathrm{k} = 32)$ (Huang et al., 2017). The VGG and DenseNet models were also modified for width scaling from the implementation of Hoffer et al. (2018). The model scales in this case are $4^{-k}$ , $0\leq k\leq 5$ , for both VGG and DenseNET.
350
+
351
+ Depth-scaling, in the CIFAR10 case (section 6.1), is performed by appending extra layers within each block.
352
+
353
+ # A.1.3 TRAINING
354
+
355
+ In the main experiments, training is done via SGD with a momentum of 0.9, weight decay of 1e-4 and initial learning rate of 0.1. For ImageNet we train for 90 epochs, decreasing the learning rate by a multiplicative factor of 0.1 after and 30 and after 60 epochs. We use a batch size of 16. For all other vision datasets we use a batch-size of 128. We begin training with a learning rate of 0.1, run for 200 epochs, and reduce by a multiplicative factor of 0.1 after 80, 120, and 160 epochs.
356
+
357
+ For the VGG and DenseNet experiments on CIFAR100 in section 6.2, we train with both SGD and Adam optimizers. We train VGG for 170 epochs and Densenet for 300 epochs. Adam hyperparameters are default, with an initial learning rate of 1e-3. When training with SGD, we retain initial learning rate, batch size, momentum, and weight-decay, as in the main experiment (at 0.1, 128, 0.9, and 1e-4 respectively) and follow standard stepped learning rate schedules: For VGG, learning rate multiplicative factor of 0.1 after 80, 120, and 160 epochs; For DenseNet, learning rate multiplicative factor of 0.1 after 150 and 225 epochs.
358
+
359
+ # A.2 LANGUAGE MODELING
360
+
361
+ # A.2.1 DATASETS
362
+
363
+ We evaluate on several datasets commonly used for (word-level) language modeling: Penn Treebank (Mikolov et al., 2010), WikiText-2 (Bradbury et al., 2017), and WikiText-103 (Merit et al., 2016). The PTB is a relatively small language modeling dataset of news texts, with a vocabu
364
+
365
+ lary of 10K unique words and about $900\mathrm{K} / 70\mathrm{K} / 80\mathrm{K}$ training/validation/test words. WikiText-2 is drawn from Wikipedia articles and it is both larger and richer, with a vocabulary of 33K words and 2M/210K/240K training/validation/test words. WikiText-103 is also based on Wikipedia, but larger still, with a vocabulary of 270K words and 100M training words (and the same validation and test sets as WikiText-2).
366
+
367
+ # A.2.2 MODELS
368
+
369
+ We experiment with two standard models for language modeling: Transformer-XL (Dai et al., 2019) and AWD-LSTM (Merit et al., 2018). Transformer-XL is a recent language modeling architecture that is based on transformer self-attention (Vaswani et al., 2017), but modified to better learn dependencies beyond a fixed length by adding a segment-level recurrence mechanism. It has achieved state-of-the-art results on multiple benchmarks. We use the official PyTorch implementation<sup>4</sup> with their base configuration: 16 layers, embedding size of 410, inner dimension of 2100 in the fully-connected layers, and 10 attention heads. Training is done with Adam. See the implementation for other details. For scaling experiments, we decimate the inner dimension. We use Transformer-XL for WikiText-103.
370
+
371
+ AWD-LSTM is a long short-term memory (Hochreiter & Schmidhuber, 1997) language model with adaptive weight averaging. We use the official implementation<sup>5</sup> with the recommended configuration: 3 layers, embedding size of 400, and hidden state size of 1150. Training is done with SGD. We use AWD-LSTM for PTB and WikiText-2 and follow the recommended settings for these two datasets. For scaling experiments, we decimate the hidden state size.
372
+
373
+ # B ERROR ESTIMATION EXPERIMENT
374
+
375
+ # B.1 EXPERIMENTAL DETAILS
376
+
377
+ In the experiment described in section 6, we fit a least squares regression model to find the best parameters minimizing the divergence $\delta(m,n)$ - evaluated at configurations $m,n$ as in Table 1:
378
+
379
+ $$
380
+ \pmb {\theta} ^ {*} = \underset {\pmb {\theta}} {\arg \min} \sum_ {n, m} | \delta (m, n; \pmb {\theta}) | ^ {2}
381
+ $$
382
+
383
+ We quantify the quality of the fit by the mean $\mu$ and standard deviation $\sigma$ of the fitted divergence by performing standard 10-fold cross validation over all points $(m,n)$ with confidence intervals reported as $\pm 1$ std over the folds.
384
+
385
+ # B.2 FOUND THETA VALUES
386
+
387
+ Table 2: Optimal values of $\theta$ as found by the least squres regression fitting the functional form.
388
+ (a) Image classification (fitting top 1 error).
389
+
390
+ <table><tr><td></td><td>α</td><td>β</td><td>b</td><td>c∞</td><td>η</td></tr><tr><td>ImageNet</td><td>0.75</td><td>0.61</td><td>0.76</td><td>3.63</td><td>18.50</td></tr><tr><td>CIFAR10</td><td>0.66</td><td>0.53</td><td>5.87·10-02</td><td>7.14·10-14</td><td>19.77</td></tr><tr><td>CIFAR100</td><td>0.70</td><td>0.51</td><td>0.15</td><td>0.71</td><td>6.93</td></tr><tr><td>DTD</td><td>0.40</td><td>1.16</td><td>4.30·10-05</td><td>1.27·10-09</td><td>0.85</td></tr><tr><td>Aircraft</td><td>1.10</td><td>0.83</td><td>3.47·10-03</td><td>5.16·10-10</td><td>1.13</td></tr><tr><td>UFC101</td><td>0.93</td><td>0.54</td><td>4.68·10-02</td><td>1.16·10-09</td><td>2.98</td></tr></table>
391
+
392
+ (b) Language modeling (fitting cross entropy loss).
393
+
394
+ <table><tr><td></td><td>α</td><td>β</td><td>b</td><td>c∞</td><td>η</td><td>ε0</td></tr><tr><td>PTB</td><td>0.81</td><td>0.34</td><td>0.15</td><td>5.00</td><td>6.27</td><td>6.10</td></tr><tr><td>WikiText-2</td><td>1.01</td><td>0.22</td><td>0.99</td><td>8.23</td><td>10.38</td><td>6.21</td></tr><tr><td>WikiText-103</td><td>0.74</td><td>0.56</td><td>0.33</td><td>9.04</td><td>16.34</td><td>6.60</td></tr></table>
395
+
396
+ # C ADDITIONAL ERROR LANDSCAPE MEASUREMENTS AND ESTIMATIONS
397
+
398
+ In this appendix, we provide error landscape measurements and estimations for all datasets, corresponding to the experiment in section 6. The results are shown in 3D graphs similar to figure 1. In each such graph, the z-axis is the logarithm of the generalization error as a function of two independent variables: the model size $m$ and the data size $n$ .
399
+
400
+ The 3D graph is deliberately portrayed in log-log-log scale, as we cover a very large range of data scales and model scales and a correspondingly wide range of errors. This view is a useful one when one wishes to evaluate both large dynamic ranges (simultaneously both very large and very small values) and is especially vivid in portraying power-law like dependencies; a power-law naturally forms a straight line in a log-log view.
401
+
402
+ In each figure, subfigure (a) shows the measured error landscape is in log-log-log scale, where each point (blue dot) is the error resulting from training with a model/data configuration $m$ , $n$ . Subfigure (b) shows the best-fit estimated error landscape. The surface is a linear interpolation between the points, which is then projected on the model-error $(m, \epsilon)$ , data-error $(n, \epsilon)$ , and model-data $(m, n)$ planes. The contour plots on each one of these planes are the projections of the error landscape surface, and are useful in considering the behavior of the surface when holding one dimension constant.
403
+
404
+ We call to attention several interesting observations on the datasets explored:
405
+
406
+ - As quantified rigorously in section 6, the fits perform well across error ranges. In these surfaces, one also gets qualitative sense of the fit adequacy across the wide ranges of the dataset and model scales directly. While perhaps slightly difficult to assess the surface directly, a helpful view is to consider the similarity between the projections of the actual and projected surfaces.
407
+ - With increasing model size, indeed typically the error does remain saturated. However, in one of our tested datasets (figure 12) there was a renewed slight increase. We verify that this is indeed over-fitting, in the sense that there is no corresponding increase in the training error. We note that the functional form we find can actually be used to veer clear of the $m$ , $n$ regions where such over-fitting may occur.
408
+ - The simplifying approach taken by considering the random guess levels (and associated transitions) for small models or small data as identical, seems to work fairly well with some deviation apparent by examining figure 15. Indeed the simplification can hold well for balanced datasets, but need not for imbalanced ones such as in the task of language modeling. Thus, a relaxation of this simplification is expected to be important conceptually and practically.
409
+
410
+ ![](images/29d2ef6bd3062e979351599eb71195e7e9b1487f5f8b88d9f3efd94069b9c19c.jpg)
411
+ (a) Actual error landscape.
412
+
413
+ ![](images/7c6aee0338c7a5b8b0f2242bf3dc420d9e28e174586b7d72ae3575ad832b4885.jpg)
414
+ (b) Estimated error landscape.
415
+
416
+ ![](images/20705807486b8e59bda4723e4073f8617cb46d97223e5c8ffc12bbefcbf26a7e.jpg)
417
+ (a) Actual error landscape.
418
+
419
+ ![](images/470bd289e2dfc390bcb01fb0d3243e2958691d0cb7735a24952f4cf0eeff1876.jpg)
420
+ Figure 7: ImageNet error landscape.
421
+ (b) Estimated error landscape.
422
+
423
+ ![](images/907a399ee186174ac1cd76ee925fd8d609f78ac7b856b65ba1f34f5263fbfe68.jpg)
424
+ (a) Actual error landscape.
425
+ Figure 9: CIFAR100 error landscape.
426
+
427
+ ![](images/f1a56d0df66e1e9a50232fe5f247a6a673d079bd3969b2a921f3b4fd622c1643.jpg)
428
+ Figure 8: CIFAR10 error landscape.
429
+ (b) Estimated error landscape.
430
+
431
+ ![](images/1503e3b78ebfa761cb479f113bd5d5f56d7ea064a66a0c594e0152fde61a2670.jpg)
432
+ (a) Actual error landscape.
433
+
434
+ ![](images/c78f2281d1a427ee2ae46af63f6e124f72f37ed2719d430a8892c57dfb2882d5.jpg)
435
+ (b) Estimated error landscape.
436
+
437
+ ![](images/9e7463b381dc188a2fcbf2faed0d4b3170dfa4adc751c9d14e9374c1111729d9.jpg)
438
+ (a) Actual error landscape.
439
+
440
+ ![](images/6167e222a1520890911ded53c32e393f50fa9d6984443df1331f381461667d3d.jpg)
441
+ Figure 10: DTD error landscape.
442
+ (b) Estimated error landscape.
443
+
444
+ ![](images/c83ac190b623757e58a885a6a9ec2ac648d64557119cd9bc7f4abe5db71704fc.jpg)
445
+ (a) Actual error landscape.
446
+ Figure 12: UFC101 error landscape.
447
+
448
+ ![](images/b8a89e43ee6a72ffffeca284adbd61b47b3647fcbadc986d988d25c83706c355.jpg)
449
+ Figure 11: Aircraft error landscape.
450
+ (b) Estimated error landscape.
451
+
452
+ ![](images/542210d63b1e7c1f3f2719d5a5014b7c6d25ec3ce6af86f85bc3d2b671d9c3fa.jpg)
453
+ (a) Actual error landscape.
454
+
455
+ ![](images/089f050cfb7586921b930c48907a4cef2fd339330fbf20ff97f7af666298da7c.jpg)
456
+ (b) Estimated error landscape.
457
+
458
+ ![](images/03e4037e5cc1126ae9d19366451556129ea11bfefe260eee9687adffd6b65a71.jpg)
459
+ (a) Actual error landscape.
460
+
461
+ ![](images/8f5fb490d971c0b69eecee10cdcdae42e6fa2ba64fa7d6df447d8d962fd1faad.jpg)
462
+ Figure 13: PTB error landscape.
463
+ (b) Estimated error landscape.
464
+
465
+ ![](images/cd690521260e955985ca24ef63172149471b606ef0abb8ebf4efc058186fec15.jpg)
466
+ (a) Actual error landscape.
467
+ Figure 15: WikiText-103 error landscape.
468
+
469
+ ![](images/afdc24780be51407fc3d00f9d4aa3cd14c5fd6b95da479e21be003593b108af0.jpg)
470
+ Figure 14: WikiText-2 error landscape.
471
+ (b) Estimated error landscape.
472
+
473
+ # D ADDITIONAL EXTRAPOLATION RESULTS
474
+
475
+ Here we provide detailed extrapolation results, for all datasets. All figures are structured in a similar way. Each subplot shows estimated (y-axis) vs. actual error (x-axis) (0 to 1 scale on both axes). Each subplot is located at the coordinate of the maximal data and model given for the task of performing the fit to the functional form in equation 5. This is the point at the top-right corner of the green dots in the illustration in figure 6a. The target is to find the error-landscape values for unseen, larger scales of both model and data (red points in the same illustration). Going from left to right in each figure indicates observed measurements of the error from models of an increasing fraction w.r.t the full size. Going from bottom-to top indicates observed measurements of the error from dataset sizes of an increasingly large fraction of the full dataset.
476
+
477
+ In each subplot, every point shows the estimated vs. actual error on a model-data configuration. Points that were given for fitting the function are colored in green, while unseen points that were not used are in red. The red points show the estimation error vs. actual error when extrapolating to all larger models and data sizes. In each subplot, the mean and standard deviation over all divergences $\delta$ at target points are given in text.
478
+
479
+ Each experiment fit of the parameters was repeated 100 times, with different random initializations of $\theta$ . The shaded bands show one standard deviation across these runs.
480
+
481
+ The quality of the extrapolation is critically dependent on the signal provided in the (green) fitted points. Two limiting factors are evident by examining the figures below, which both play a role in the well-posedness of the solution:
482
+
483
+ - The proximity to the initial random guess level. Only upon transitioning from the initial error plateau, does meaningful signal about the scaling rates become available. Indeed, for scales prior still in the region or close to the initial error level, one sees poor extrapolation results; see figures 18, 19, and 21, and the vivid origin of this phenomena by examining figures 11, 10, and 12.
484
+ - A second source of ill-posedness is tied to the number of configurations used for the estimation of $\theta$ . Clearly, when this is small, one cannot expect the extrapolation to be stable. In fact, at least two measurements in each scaling dimension (model/data) are needed, and no less than the number of parameters in $\theta$ in total. Indeed, for all the plots in this appendix, the smallest scale of $m, n$ is omitted from the graph such that the lowermost row and leftmost column span exactly two model and data scales correspondingly. Of course, there is nothing tying directly the number of points and scale of configurations measured, and one can decouple these two factors by taking closer spaced samples at small scale.
485
+ - When both the above factors are not limiting the measurement, one readily sees that for divergences of no more than a few percent, it is sufficient to measure model/data configurations which are far-ranged from the configurations which one wishes to extrapolate to
486
+
487
+ ![](images/b70d8b6bc75bd430a540e5c2bc12a2e2e3f8b53ad785a90cd63a65630d0c8b07.jpg)
488
+ imagenet
489
+ Figure 16: ImageNet extrapolation results.
490
+
491
+ ![](images/b6e6f3f8adc7dd81c0efbf33009d32030d53dac7b777b4c91d17e36e8e0f6466.jpg)
492
+ decathlon_cifar100
493
+ Figure 17: CIFAR100 Extrapolation Results
494
+
495
+ ![](images/a0f5481b6afe8aecd43cb4bbf745dd42ce405a14211397212865249f61285ea3.jpg)
496
+ decathlon_aircraft
497
+ Figure 18: Aircraft extrapolation results.
498
+
499
+ ![](images/72f0954c5aa3a66b80997fe1725bcf333030aa6aa54976142f445acf6d3f43e8.jpg)
500
+ decathlon_dtd
501
+ Figure 19: DTD Results
502
+
503
+ ![](images/720937b919957381b5c05fe8055bb9b28dd24e3306c961edb70806d92ec4fddb.jpg)
504
+ cifar10
505
+ Figure 20: CIFAR10 extrapolation results.
506
+
507
+ ![](images/e4049867b170e4c7f3f87e2db3979eca254d9c814147840e52e38e4bda967e16.jpg)
508
+ decathlon.ucf101
509
+ Figure 21: UCF101 extrapolation results.
510
+
511
+ ![](images/564dbae400b49b300652fab58d6579380f023c4c82507ab28e518d9907eed2b5.jpg)
512
+ PTB
513
+ Figure 22: PTB extrapolation results.
514
+
515
+ ![](images/8640e5f04113fb14729bdbd5b7c75a0f291ae236e2142283403b1eef83cfe694.jpg)
516
+ wiki2
517
+ Figure 23: WikiText-2 extrapolation results.
518
+
519
+ ![](images/90b36bcf5315eb5ad671edaf04ad9a6dd8cd6a4465e7cfbf306c27fafb09d7f6.jpg)
520
+ wiki103
521
+ Figure 24: WikiText-103 extrapolation results.
aconstructivepredictionofthegeneralizationerroracrossscales/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57053c331e1bf8c0791e5b41967100161e1975f66b1e647ccf9a50e350d072a5
3
+ size 2147791
aconstructivepredictionofthegeneralizationerroracrossscales/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4a07c018d1618bd21163765cacecd04d49c39ebc8d97baaa197e3cabeef26cd
3
+ size 690240
acriticalanalysisofselfsupervisionorwhatwecanlearnfromasingleimage/34fda455-8a2c-410c-a1d4-5e379b1bc611_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:574a5dbc30c002b507afc8f93cadd4745742c2fe746c4eac4f083f3245713ca2
3
+ size 87564
acriticalanalysisofselfsupervisionorwhatwecanlearnfromasingleimage/34fda455-8a2c-410c-a1d4-5e379b1bc611_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:344e0417d2a50785ccd987ece67d664e28d4bc6475874d4b04b336d0af5812d2
3
+ size 109281