SlowGuess commited on
Commit
d33e49b
·
verified ·
1 Parent(s): 04df119

Add Batch 774a55c9-b20b-41ca-b973-8d533f6f9150

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. alatentmorphologymodelforopenvocabularyneuralmachinetranslation/cd7bcd2e-8ef7-4536-aede-1ef1edb818ec_content_list.json +3 -0
  2. alatentmorphologymodelforopenvocabularyneuralmachinetranslation/cd7bcd2e-8ef7-4536-aede-1ef1edb818ec_model.json +3 -0
  3. alatentmorphologymodelforopenvocabularyneuralmachinetranslation/cd7bcd2e-8ef7-4536-aede-1ef1edb818ec_origin.pdf +3 -0
  4. alatentmorphologymodelforopenvocabularyneuralmachinetranslation/full.md +352 -0
  5. alatentmorphologymodelforopenvocabularyneuralmachinetranslation/images.zip +3 -0
  6. alatentmorphologymodelforopenvocabularyneuralmachinetranslation/layout.json +3 -0
  7. albertalitebertforselfsupervisedlearningoflanguagerepresentations/ca492a7d-65ed-4555-8eed-cbaf0838bc02_content_list.json +3 -0
  8. albertalitebertforselfsupervisedlearningoflanguagerepresentations/ca492a7d-65ed-4555-8eed-cbaf0838bc02_model.json +3 -0
  9. albertalitebertforselfsupervisedlearningoflanguagerepresentations/ca492a7d-65ed-4555-8eed-cbaf0838bc02_origin.pdf +3 -0
  10. albertalitebertforselfsupervisedlearningoflanguagerepresentations/full.md +353 -0
  11. albertalitebertforselfsupervisedlearningoflanguagerepresentations/images.zip +3 -0
  12. albertalitebertforselfsupervisedlearningoflanguagerepresentations/layout.json +3 -0
  13. amutualinformationmaximizationperspectiveoflanguagerepresentationlearning/d375015a-3459-4918-8a50-7e23cf8bf217_content_list.json +3 -0
  14. amutualinformationmaximizationperspectiveoflanguagerepresentationlearning/d375015a-3459-4918-8a50-7e23cf8bf217_model.json +3 -0
  15. amutualinformationmaximizationperspectiveoflanguagerepresentationlearning/d375015a-3459-4918-8a50-7e23cf8bf217_origin.pdf +3 -0
  16. amutualinformationmaximizationperspectiveoflanguagerepresentationlearning/full.md +268 -0
  17. amutualinformationmaximizationperspectiveoflanguagerepresentationlearning/images.zip +3 -0
  18. amutualinformationmaximizationperspectiveoflanguagerepresentationlearning/layout.json +3 -0
  19. andthebitgoesdownrevisitingthequantizationofneuralnetworks/2ff3df52-e78c-49b4-868c-2b4d8290c64b_content_list.json +3 -0
  20. andthebitgoesdownrevisitingthequantizationofneuralnetworks/2ff3df52-e78c-49b4-868c-2b4d8290c64b_model.json +3 -0
  21. andthebitgoesdownrevisitingthequantizationofneuralnetworks/2ff3df52-e78c-49b4-868c-2b4d8290c64b_origin.pdf +3 -0
  22. andthebitgoesdownrevisitingthequantizationofneuralnetworks/full.md +232 -0
  23. andthebitgoesdownrevisitingthequantizationofneuralnetworks/images.zip +3 -0
  24. andthebitgoesdownrevisitingthequantizationofneuralnetworks/layout.json +3 -0
  25. anexponentiallearningrateschedulefordeeplearning/ea0ef8fc-140d-46a6-a3d9-e0ab973e218f_content_list.json +3 -0
  26. anexponentiallearningrateschedulefordeeplearning/ea0ef8fc-140d-46a6-a3d9-e0ab973e218f_model.json +3 -0
  27. anexponentiallearningrateschedulefordeeplearning/ea0ef8fc-140d-46a6-a3d9-e0ab973e218f_origin.pdf +3 -0
  28. anexponentiallearningrateschedulefordeeplearning/full.md +0 -0
  29. anexponentiallearningrateschedulefordeeplearning/images.zip +3 -0
  30. anexponentiallearningrateschedulefordeeplearning/layout.json +3 -0
  31. aprobabilisticformulationofunsupervisedtextstyletransfer/8a8e36c5-cd0a-4d7b-8c4a-35b8b98e0c08_content_list.json +3 -0
  32. aprobabilisticformulationofunsupervisedtextstyletransfer/8a8e36c5-cd0a-4d7b-8c4a-35b8b98e0c08_model.json +3 -0
  33. aprobabilisticformulationofunsupervisedtextstyletransfer/8a8e36c5-cd0a-4d7b-8c4a-35b8b98e0c08_origin.pdf +3 -0
  34. aprobabilisticformulationofunsupervisedtextstyletransfer/full.md +269 -0
  35. aprobabilisticformulationofunsupervisedtextstyletransfer/images.zip +3 -0
  36. aprobabilisticformulationofunsupervisedtextstyletransfer/layout.json +3 -0
  37. asignalpropagationperspectiveforpruningneuralnetworksatinitialization/fb887d1b-cf6a-4d54-b0a8-375a2f81ac23_content_list.json +3 -0
  38. asignalpropagationperspectiveforpruningneuralnetworksatinitialization/fb887d1b-cf6a-4d54-b0a8-375a2f81ac23_model.json +3 -0
  39. asignalpropagationperspectiveforpruningneuralnetworksatinitialization/fb887d1b-cf6a-4d54-b0a8-375a2f81ac23_origin.pdf +3 -0
  40. asignalpropagationperspectiveforpruningneuralnetworksatinitialization/full.md +421 -0
  41. asignalpropagationperspectiveforpruningneuralnetworksatinitialization/images.zip +3 -0
  42. asignalpropagationperspectiveforpruningneuralnetworksatinitialization/layout.json +3 -0
  43. asymptoticsofwidenetworksfromfeynmandiagrams/95e4fdf1-a2a2-4bcc-8d43-612412d85781_content_list.json +3 -0
  44. asymptoticsofwidenetworksfromfeynmandiagrams/95e4fdf1-a2a2-4bcc-8d43-612412d85781_model.json +3 -0
  45. asymptoticsofwidenetworksfromfeynmandiagrams/95e4fdf1-a2a2-4bcc-8d43-612412d85781_origin.pdf +3 -0
  46. asymptoticsofwidenetworksfromfeynmandiagrams/full.md +0 -0
  47. asymptoticsofwidenetworksfromfeynmandiagrams/images.zip +3 -0
  48. asymptoticsofwidenetworksfromfeynmandiagrams/layout.json +3 -0
  49. atstabilitysedgehowtoadjusthyperparameterstopreserveminimaselectioninasynchronoustrainingofneuralnetworks/69a3b900-5a48-447f-89ee-4bb1ead753e3_content_list.json +3 -0
  50. atstabilitysedgehowtoadjusthyperparameterstopreserveminimaselectioninasynchronoustrainingofneuralnetworks/69a3b900-5a48-447f-89ee-4bb1ead753e3_model.json +3 -0
alatentmorphologymodelforopenvocabularyneuralmachinetranslation/cd7bcd2e-8ef7-4536-aede-1ef1edb818ec_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9b10538faa51164ffe928c52c75a053122ed9ecc078d69bd48d15c234f10603
3
+ size 92048
alatentmorphologymodelforopenvocabularyneuralmachinetranslation/cd7bcd2e-8ef7-4536-aede-1ef1edb818ec_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e71030d6f6fab38daaeb19804044103d1c1d9752f74e019eb76493b1ce5394c4
3
+ size 111384
alatentmorphologymodelforopenvocabularyneuralmachinetranslation/cd7bcd2e-8ef7-4536-aede-1ef1edb818ec_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca52c73b42b142acc50b9ca82c4cc2b3714e2a6f4ba9608a60f96fdd80a30fed
3
+ size 833888
alatentmorphologymodelforopenvocabularyneuralmachinetranslation/full.md ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A LATENT MORPHOLOGY MODEL FOR OPEN-VOCABULARY NEURAL MACHINE TRANSLATION
2
+
3
+ Duygu Ataman*
4
+
5
+ University of Zurich
6
+
7
+ ataman@cl.uzh.ch
8
+
9
+ Wilker Aziz
10
+
11
+ University of Amsterdam w.aziz@uva.nl
12
+
13
+ Alexandra Birch
14
+
15
+ University of Edinburgh a.birch@ed.ac.uk
16
+
17
+ # ABSTRACT
18
+
19
+ Translation into morphologically-rich languages challenges neural machine translation (NMT) models with extremely sparse vocabularies where atomic treatment of surface forms is unrealistic. This problem is typically addressed by either pre-processing words into subword units or performing translation directly at the level of characters. The former is based on word segmentation algorithms optimized using corpus-level statistics with no regard to the translation task. The latter learns directly from translation data but requires rather deep architectures. In this paper, we propose to translate words by modeling word formation through a hierarchical latent variable model which mimics the process of morphological inflection. Our model generates words one character at a time by composing two latent representations: a continuous one, aimed at capturing the lexical semantics, and a set of (approximately) discrete features, aimed at capturing the morphosynthetic function, which are shared among different surface forms. Our model achieves better accuracy in translation into three morphologically-rich languages than conventional open-vocabulary NMT methods, while also demonstrating a better generalization capacity under low to mid-resource settings.
20
+
21
+ # 1 INTRODUCTION
22
+
23
+ Neural machine translation (NMT) models are conventionally trained by maximizing the likelihood of generating the target side of a bilingual parallel corpus of observations one word at a time conditioned on their full observed context. NMT models must therefore learn distributed representations that accurately predict word forms in very diverse contexts, a process that is highly demanding in terms of training data as well as the network capacity. Under conditions of lexical sparsity, which includes both the case of unknown words and the case of known words occurring in surprising contexts, the model is likely to struggle. Such adverse conditions are typical of translation involving morphologically-rich languages, where any single root may lead to exponentially many different surface realizations depending on its syntactic context. Such highly productive processes of word formation lead to many word forms being rarely or ever observed with a particular set of morphosyntactic attributes. The standard approach to overcome this limitation is to pre-process words into subword units that are shared among words, which are, in principle, more reliable as they are observed more frequently in varying context (Sennrich et al., 2016; Wu et al., 2016). One drawback related to this approach, however, is that the estimation of the subword vocabulary relies on word segmentation methods optimized using corpus-dependent statistics, disregarding any linguistic notion of morphology and the translation objective. This often produces subword units that are semantically ambiguous as they might be used in far too many lexical and syntactic contexts (Ataman et al., 2017). Moreover, in this approach, a word form is then generated by prediction of multiple subword units, which makes generalizing to unseen word forms more difficult due to the possibility that a subword unit necessary to reconstruct a given word form may be unlikely in a given context. To alleviate the sub-optimal effects of using explicit segmentation and generalize better to new morphological forms, recent studies explored the idea of extending NMT to model translation directly at
24
+
25
+ the level of characters (Kreutzer & Sokolov, 2018; Cherry et al., 2018), which, in turn, have demonstrated the requirement of using comparably deeper networks, as the network would then need to learn longer distance grammatical dependencies (Sennrich, 2017).
26
+
27
+ In this paper, we explore the benefits of explicitly modeling variation in surface forms of words using techniques from deep latent variable modeling in order to improve translation accuracy for low-resource and morphologically-rich languages. Latent variable models allow us to inject inductive biases relevant to the task, which, in our case, is word formation during translation. In order to formulate the process of morphological inflection, design a hierarchical latent variable model which translates words one character at a time based on word representations learned compositionally from sub-lexical components. In particular, for each word, our model generates two latent representations: i) a continuous-space dense vector aimed at capturing the lexical semantics of the word in a given context, and ii) a set of (approximately) discrete features aimed at capturing that word's morphosyntactic role in the sentence. We then see inflection as decoding a word form, one character at a time, from a learned composition of these two representations. By forcing the model to encode each word representation in terms of a more compact set of latent features, we encourage them to be shared across contexts and word forms, thus, facilitating generalization under sparse settings. We evaluate our method in translating English into three morphologically-rich languages each with a distinct morphological typology: Arabic, Czech and Turkish, and show that our model is able to obtain better translation accuracy and generalization capacity than conventional approaches to open-vocabulary NMT.
28
+
29
+ # 2 NEURAL MACHINE TRANSLATION
30
+
31
+ In this paper, we use recurrent NMT architectures based on the model developed by Bahdanau et al. (2014). The model essentially estimates the conditional probability of translating a source sequence $x = \langle x_1, x_2, \ldots, x_m \rangle$ into a target sequence $y = \langle y_1, y_2, \ldots, y_l \rangle$ via an exact factorization:
32
+
33
+ $$
34
+ p (y | x, \theta) = \prod_ {i = 1} ^ {l} p \left(y _ {j} \mid x, y _ {< i}, \theta\right) \tag {1}
35
+ $$
36
+
37
+ where $y_{<i}$ stands for the sequence preceding the $i$ th target word. At each step of the sequence, a fixed neural network architecture maps its inputs, the source sentence and the target prefix, to the probability of the $i$ th target word observation in context. In order to condition on the source sentence fully, this network employs an embedding layer and a bi-directional recurrent neural network (bi-RNN) based encoder. Conditioning on the target prefix $y_{<i}$ is implemented using a recurrent neural network (RNN) based decoder, and an attention mechanism which summarises the source sentence into a context vector $\mathbf{c}_i$ as a function of a given prefix (Luong et al., 2015). Given a parallel training set $\mathcal{D}$ , the parameters $\theta$ of the network are estimated to attain a local minimum of the negative log-likelihood function $\mathcal{L}(\theta|\mathcal{D}) = -\sum_{x,y \sim \mathcal{D}} \log p(y|x,\theta)$ via stochastic gradient-based optimization (Bottou & Cun, 2004).
38
+
39
+ Atomic parameterization estimates the probability of generating each target word $y_{i}$ in a single shot:
40
+
41
+ $$
42
+ p \left(y _ {i} \mid x, y _ {< i}, \theta\right) = \frac {\exp \left(\mathbf {E} _ {y _ {i}} ^ {\top} \mathbf {h} _ {i}\right)}{\sum_ {e = 1} ^ {v} \exp \left(\mathbf {E} _ {e} ^ {\top} \mathbf {h} _ {i}\right)}, \tag {2}
43
+ $$
44
+
45
+ where $\mathbf{E} \in \mathbb{R}^{v \times d}$ is the target embedding matrix and the decoder output $\mathbf{h}_i \in \mathbb{R}^d$ represents $x$ and $y_{<i}$ . Clearly, the size $v$ of the target vocabulary plays an important role in determining the complexity of the model, which creates an important bottleneck when translating into low-resource and morphologically-rich languages due to the sparsity in the lexical distribution.
46
+
47
+ Recent studies approached this problem by performing NMT with subword units, a popular one of which is based on the Byte-Pair Encoding algorithm (BPE; Sennrich et al., 2016), which finds the optimal description of a corpus vocabulary by iteratively merging the most frequent character sequences. Atomic parameterization could also be used to model translation at the level of characters, which is found to be advantageous in generalizing to morphological variations (Cherry et al., 2018).
48
+
49
+ Hierarchical paramaterization further factorizes the probability of a target word in context:
50
+
51
+ $$
52
+ p \left(y _ {i} \mid x, y _ {< i}, \theta\right) = \prod_ {j = 1} ^ {l _ {i}} p \left(y _ {i, j} \mid x, y _ {< i}, y _ {i, < j}, \theta\right) \tag {3}
53
+ $$
54
+
55
+ where the $i$ th word $y_{i} = \langle y_{i,1},\dots ,y_{i,l_{i}}\rangle$ is seen as a sequence of $l_{i}$ characters. Generation follows one character at a time, each with probability computed by a fixed neural network architecture with varying inputs, namely, the source sentence $x$ , the target prefix $y_{< i}$ , and the prefix $y_{i,< j}$ of characters already generated for that word. In this case there are two recurrent cells, one updated at the boundary of each token, much like in the standard case, and another updated at the character level. Luong & Manning (2016) propose hierarchical parameterization to compute the probability $p(y_i|x,y_{< i},\theta)$ for unknown words, while for known words they use the atomic parameterization. In this paper, we use the hierarchical parameterization method for generating all target words, where we also augment the input embedding layer with a character-level bi-RNN, which computes each word representation $\mathbf{y}_i$ as a composition of the embeddings of their characters (Ling et al., 2015).
56
+
57
+ # 3 A LATENT MORPHOLOGY MODEL (LMM) FOR LEARNING WORD REPRESENTATIONS
58
+
59
+ The application of a hierarchical structure for learning word representations in language modeling (Vania & Lopez, 2017) or semantic role labeling (Sahin & Steedman, 2018) have shown that such representations encode many cues about the morphological features of words by establishing a mapping between phonetic units and lexical context. Although it can provide an alternative solution to open-vocabulary NMT by potentially alleviating the need for subword segmentation, the quality of word representations learned by an hierarchical model is still highly dependant on the amount of observations (Sahin & Steedman, 2018; Ataman et al., 2019), since the training data is essential in properly modeling the lexical distribution. On the other hand, the process of word formation, particularly morphological inflection, has many properties that remain universal across languages, where a word is typically composed of a lemma, representing its lexical semantics, and a distinct combination of categorical inflectional features expressing the word's syntactic role in the phrase or sentence. In this paper, we propose to manipulate this universal structure in order to enforce an inductive bias on the prior distribution of words and allow the hierarchical parameterization model in properly learning lexical representations under conditions of data sparsity.
60
+
61
+ # 3.1 GENERATIVE MODEL
62
+
63
+ Our generative LMM for NMT formulates word formation in terms of a stochastic process, where each word is generated one character at a time by composing two latent representations: a continuous vector aimed at capturing the lemma, and a set of sparse features aimed at capturing the inflectional features. The motivation for using a stochastic model is twofold. First, deterministic models are by definition unimodal: when presented with the same input (the same context) they always produce the same output. When we model the word formation process, it is reasonable to expect a larger degree of ambiguity, that is, for the same context (e.g. a noun prefix), we may continue by inflecting the word differently depending on the (latent) mode of operation we are at (e.g. generating nominative, accusative or dative noun). Second, in stochastic models, the choice of distribution gives us a mechanism to favour a particular type of representation. In our case, we use sparse distributions for inflectional features to accommodate the fact that morphosyntactic features are discrete in nature. Our latent variable model is an instance of a variational auto-encoder (VAE; Kingma & Welling, 2013) inspired by the model of Zhou & Neubig (2017) for morphological reinflation.
64
+
65
+ Generation of the $i$ th word starts by sampling a Gaussian-distributed representation in context. This requires predicting the Gaussian location $\mathbf{u}_i$ and scale $\mathbf{s}_i$ vectors, $^1$
66
+
67
+ $$
68
+ Z _ {i} | x, y _ {< i} \sim \mathcal {N} (\mathbf {u} _ {i}, \operatorname {d i a g} (\mathbf {s} _ {i} \odot \mathbf {s} _ {i}))
69
+ $$
70
+
71
+ $$
72
+ \mathbf {u} _ {i} = \operatorname {d e n s e} \left(\mathbf {h} _ {i}; \theta_ {\mathrm {u}}\right) \tag {4}
73
+ $$
74
+
75
+ $$
76
+ \mathbf {s} _ {i} = \zeta (\operatorname {d e n s e} (\mathbf {h} _ {i}; \theta_ {\mathrm {s}}))
77
+ $$
78
+
79
+ ![](images/88048b9b2890461a9152485f9b37fc30627cdaca5ef267a59b2d5a252452bca8.jpg)
80
+ Figure 1: LMM for computing word representations while translating the sentence '... went home' into Turkish ('eve-(to)home gitti(he/she/it)went'). The character-level decoder is initialized with the attentional vector $\mathbf{h}_i$ computed by the attention mechanism using current context $\mathbf{c}_i$ and the word representation $\mathbf{t}_i$ as in Luong & Manning (2016).
81
+
82
+ where prediction of the location (in $\mathbb{R}^d$ ) and scale (in $\mathbb{R}_{>0}^d$ ) from the word-level decoder hidden state $\mathbf{h}_i$ (which represents $x$ and $y_{<i}$ ) is performed by two dense layers, and the scale values are ensured to be positive with the softmax ( $\zeta$ ) activation.
83
+
84
+ Generation proceeds by then sampling a $K$ -dimensional vector $f_{i}$ of sparse scalar features (see §3.2) conditioned on the source $x$ , the target prefix $y_{< i}$ , and the sampled lemma $z_{i}$ . We model sampling of $f_{i}$ conditioned on $z_{i}$ in order to capture the insight that inflectional transformations typically depend on the category of a lemma. Having sampled $f_{i}$ and $z_{i}$ , the representation of the $i$ th target word is computed by a transformation of $z_{i}$ and $f_{i}$ , i.e. $\mathbf{t}_i = \mathrm{dense}([z_i,f_i];\theta_{\mathrm{comp}})$ .
85
+
86
+ As shown in Figure 1, our model generates each word character by character auto-regressively by conditioning on the word representation $\mathbf{t}_i$ predicted by the LMM, the current context $\mathbf{c}_i$ , and the previously generated characters following the hierarchical parameterization. See Algorithm 1 for details on generation.
87
+
88
+ Input: model parameters $\theta$ , latent lemma $z_{i}$ , latent morphological attributes $f_{i}$ , observed character sequence $\langle y_{i,1},\ldots ,y_{i,l_i}\rangle$ if training or placeholders if test, decoder state $\mathbf{h}_i$ , and context vector $\mathbf{c}_i$
89
+
90
+ Result: updated decoder hidden state, prediction (a word), probability of prediction (for loss) initialization;
91
+
92
+ $\mathbf{t}_i = \mathrm{dense}([z_i,f_i];\theta_{\mathrm{comp}})$
93
+
94
+ initialize char-rnn with a projection of $[\mathbf{t}_i,\mathbf{c}_i]$
95
+
96
+ for $j < l_{i}$ and $j < \max$ do
97
+
98
+ ```txt
99
+ compute output layer from char-rnn state ; if training then set prediction to observation $y_{i,j}$ . else set prediction to arg max of output softmax layer; end assess log-probability of prediction ; update word-level RNN decoder with prediction;
100
+ ```
101
+
102
+ # end
103
+
104
+ Algorithm 1: Word generation: in training the word is observed, thus we only update the decoder and assess the probability of the observation, in test, we use mean values of the distributions to represent most likely values for $z$ and $f$ and populate predictions with beam-search.
105
+
106
+ # 3.2 SPARSE FEATURES
107
+
108
+ Since each target word $y_{i}$ may have multiple inflectional features, ideally, we would like $f_{i}$ to be $K$ feature indicators, which could be achieved by sampling from $K$ independent Bernoulli distributions parameterized in context. The problem with this approach is that sampling Bernoulli outcomes is non-differentiable, thus, their training requires gradient estimation via REINFORCE (Williams, 1992) and sophisticated variance reduction techniques. An alternative approach that has recently become popular is to use relaxations such as the Concrete distribution or Gumbel-Softmax (Maddison et al., 2017; Jang et al., 2017) in combination with the straight-through estimator (ST; Bengio et al., 2013). This is based on the idea of relaxing the discrete variable from taking on samples in the discrete set $\{0,1\}$ to taking on samples in the continuous set $(0,1)$ using a distribution for which a reparameterization exists (e.g. Gumbel). Then, a non-differentiable activation (e.g. a threshold function) maps continuous outcomes to discrete ones. ST simply ignores the discontinuous activation in the backward pass, i.e. it assumes the Jacobian is the identity matrix. This does lead to biased estimates of the gradient of the loss, which is in conflict with the requirements behind stochastic optimization (Robbins & Monro, 1951).
109
+
110
+ An alternative presented by Louizos et al. (2018) achieves a different compromise, it gets rid of bias at the cost of mixing both sparse and dense outcomes. The idea is to obtain a continuous sample $c \in (0, 1)$ from a distribution for which a reparameterization exists and stretch it to a continuous support $(l, r) \supset (0, 1)$ using a simple linear transformation $s = l + (r - l)c$ . A rectifier is then employed to map the negative outcomes to 0 and the positive outcomes larger than one to 1, i.e. $f = \min(1, \max(0, s))$ . The rectifier is only non-differentiable at $s = 0$ and at $s = 1$ , however, because the stretched variable $s$ is sampled from a continuous distribution, the chance of sampling $s = 0$ and $s = 1$ is essentially 0. This stretched-and-rectified distribution allows: i) the sampling procedure to become differentiable with respect to the parameters of the distribution, ii) to sample sparse outcomes with an unbiased estimator, and iii) to calculate the probability of sampling $f = 0$ and $f = 1$ in closed form as a function of the parameters of the underlying distribution, which corresponds to the probability of sampling $s < 0$ and $s > 1$ , respectively.
111
+
112
+ In their paper, Louizos et al. (2018) used the BinaryConcrete (or Gumbel-Sigmoid) as the underlying continuous distribution, the sparsity of which is controlled via a temperature parameter. However, in our study, we found this parameter difficult to predict, since it is very hard to allow a neural network to control its value without unstable gradient updates. Instead, we opt for a slight variant by Bastings et al. (2019) based on the Kumaraswamy distribution (Kumaraswamy, 1980), a two-parameters distribution that closely resembles a Beta distribution and is sparse whenever its (strictly positive) parameters are between 0 and 1. In the context of text classification, Bastings et al. (2019) shows this stretch-and-rectify technique to work better than methods based on REINFORCE.
113
+
114
+ For each token $y_{i}$ , we sample $K$ independent Kumaraswamy variables in context,
115
+
116
+ $$
117
+ C _ {i, k} | x, y < i, z _ {i} \sim \operatorname {K u m a} \left(a _ {i, k}, b _ {i, k}\right) \quad k = 1, \dots , K \tag {5}
118
+ $$
119
+
120
+ $$
121
+ [ \mathbf {a} _ {i}, \mathbf {b} _ {i} ] = \zeta (\operatorname {d e n s e} ([ z _ {i}, \mathbf {h} _ {i} ]; \theta_ {\mathrm {a b}}))
122
+ $$
123
+
124
+ which makes a continuous random vector $c_{i}$ in the support $(0,1)^{K.4}$ . We then stretch-and-rectify the samples via $f_{i,k} = \min(1, \max(0, l - (r - l)c_{i,k}))$ making $f_{i}$ a random vector in the support $[0,1]^{K.5}$ . The probability that $f_{i,k}$ is exactly 0 is
125
+
126
+ $$
127
+ \pi_ {i, k} ^ {\{0 \}} = \int_ {0} ^ {\frac {- l}{r - l}} \operatorname {K u m a} (c | a _ {i, k}, b _ {i, k}) \mathrm {d} c \tag {6a}
128
+ $$
129
+
130
+ and the probability that $f_{i,k}$ is exactly 1 is
131
+
132
+ $$
133
+ \pi_ {i, k} ^ {\{1 \}} = 1 - \int_ {0} ^ {\frac {1 - l}{r - l}} \operatorname {K u m a} (c | a _ {i, k}, b _ {i, k}) \mathrm {d} c \tag {6b}
134
+ $$
135
+
136
+ and therefore the complement
137
+
138
+ $$
139
+ \pi_ {i, k} ^ {(0, 1)} = 1 - \pi_ {i, k} ^ {\{0 \}} - \pi_ {i, k} ^ {\{1 \}} \tag {6c}
140
+ $$
141
+
142
+ is the probability that $f_{i,k}$ be any continuous value in the open set $(0,1)$ . In §3.4, we will derive regularizers based on $\pi_{i,k}^{(0,1)}$ to promote sparse outcomes to be sampled with large probability.
143
+
144
+ # 3.3 PARAMETER ESTIMATION
145
+
146
+ Parameter estimation of neural network models is typically done via maximum-likelihood estimation (MLE), where we approach a local minimum of the negative log-likelihood function via stochastic gradient descent with gradient computation automated by the back-propagation algorithm. Using the following shorthand notation:
147
+
148
+ $$
149
+ \alpha \left(z _ {i}\right) \triangleq p \left(z _ {i} \mid x, y _ {< i}, z _ {< i}, f _ {< i}, \theta\right) \tag {7a}
150
+ $$
151
+
152
+ $$
153
+ \beta \left(f _ {i}\right) \triangleq \prod_ {k = 1} ^ {K} p \left(f _ {i, k} \mid x, y _ {< i}, z _ {< i}, f _ {< i}, z _ {i}, \theta\right) \tag {7b}
154
+ $$
155
+
156
+ $$
157
+ \gamma \left(y _ {i}\right) \triangleq \prod_ {j = 1} ^ {l _ {i}} p \left(y _ {i, j} \mid x, y _ {< i}, z _ {\leq i}, f _ {\leq i}, y _ {i, < j}, \theta\right). \tag {7c}
158
+ $$
159
+
160
+ The log-likelihood for a single data point can be formulated as:
161
+
162
+ $$
163
+ \log p (y | x, \theta) = \log \int \prod_ {i = 1} ^ {l} \alpha \left(z _ {i}\right) \beta \left(f _ {i}\right) \gamma \left(y _ {i}\right) \mathrm {d} z \mathrm {d} f \tag {8}
164
+ $$
165
+
166
+ the computation of which is intractable. Instead, we resort to variational inference (VI; Jordan et al., 1999), where we optimize a lower-bound on the log-likelihood
167
+
168
+ $$
169
+ \mathbb {E} _ {q (z, f | x, y, \lambda)} \left[ \sum_ {i = 1} ^ {l} \log \frac {\alpha \left(z _ {i}\right) \beta \left(f _ {i}\right) \gamma \left(y _ {i}\right)}{q (z , f | x , \lambda)} \right] \tag {9}
170
+ $$
171
+
172
+ expressed with respect to an independently parameterized posterior approximation $q(z, f|x, y, \lambda)$ . For as long as sampling from the posterior is tractable and can be performed via a reparameterization, we can rely on stochastic gradient-based optimization. In order to have a compact parameterization, we choose
173
+
174
+ $$
175
+ q (z, f | x, y, \lambda) := \prod_ {i = 1} ^ {l} \alpha \left(z _ {i}\right) \beta \left(f _ {i}\right). \tag {10}
176
+ $$
177
+
178
+ This simplifies the lowerbound, which then takes the form of $l$ nested expectations, the $i$ th of which is $\mathbb{E}_{\alpha (z_i)\beta (f_i)}[\log \gamma (y_i)]$ . This is similar to the stochastic decoder of Schulz et al. (2018), though our approximate posterior is in fact, also our parameterized prior. Although this objective does not particularly promote sparsity, we employ sparsity-inducing regularization techniques that will be discussed in the next section.
179
+
180
+ Concretely, for a given source sentence $x$ , target prefix $y_{<i}$ , and a latent sample $z_{\leq i}, f_{\leq i}$ , we obtain a single-sample estimate of the loss by computing $\mathcal{L}_i(\theta) = -\log \gamma(y_i)$ .
181
+
182
+ # 3.4 REGULARIZATION
183
+
184
+ In order to promote sparse distributions for the inflectional features, we apply a regularizer inspired by expected $L_{0}$ regularization (Louizos et al., 2018). Whereas $L_{0}$ is a penalty based on the number of non-zero outcomes, we design a penalty based on the expected number of continuous outcomes, which corresponds to $\pi_{i,k}^{(0,1)}$ as shown in Equation (6). For a given source sentence $x$ , target prefix $y_{<i}$ , and a latent sample $z_{<i}$ , $f_{<i}$ , we aggregate this penalty for each feature
185
+
186
+ $$
187
+ \mathcal {R} _ {i} (\theta) = \sum_ {k = 1} ^ {K} \pi_ {i, k} ^ {(0, 1)} \tag {11}
188
+ $$
189
+
190
+ and add it to the cost function with a positive weight $\rho$ . The final loss of the NMT model is
191
+
192
+ $$
193
+ \mathcal {L} (\theta | \mathcal {D}) = \sum_ {x, y \sim \mathcal {D}} \sum_ {i = 1} ^ {| y |} \mathcal {L} _ {i} (\theta) + \rho \mathcal {R} _ {i} (\theta). \tag {12}
194
+ $$
195
+
196
+ # 3.5 PREDICTIONS
197
+
198
+ In our model, obtaining the conditional likelihood for predicting the most likely hypothesis requires marginalisation of the latent variables, which is intractable. An alternative approach is to heuristically search through the joint distribution,
199
+
200
+ $$
201
+ \underset {y, z, f} {\arg \max } p (y, z, f | x), \tag {13}
202
+ $$
203
+
204
+ rather than the marginal, an approximation that has been referred to as Viterbi decoding (Smith, 2011). During beam search, we populate the beam with alternative target words, and for each prefix $y_{<i}$ in the beam, we resort to deterministically choosing the latent variables based on a single sample which we deem representative of their distributions, which is a common heuristic in VAEs for translation (Zhang et al., 2016; Schulz et al., 2018). For unimodal distributions, such as the Gaussian $p(z_i|x, y_{<i}, z_{<i}, f_{<i})$ , we use the analytic mean, whereas for multimodal distributions, such as the Hard Kumaraswamy $p(f_i|x, y_{<i}, z_{\leq i}, f_{<i})$ , we use the argmax.
205
+
206
+ # 4 EVALUATION
207
+
208
+ # 4.1 MODELS
209
+
210
+ We evaluate our model by comparing it in machine translation against three baselines which constitute the conventional open-vocabulary NMT methods, including architectures using atomic parameterization either with subword units segmented with BPE (Sennrich et al., 2016) or characters, and the hierarchical parameterization method employed for generating all words in the output. We implement all architectures using Pytorch (Paszke et al., 2017) within the OpenNMT-py framework (Klein et al., 2017)<sup>8</sup>.
211
+
212
+ # 4.2 DATA AND LANGUAGES
213
+
214
+ In order to evaluate our model we design two sets of experiments. The experiments in §4.4.1 aim to evaluate different methods under low-resource settings, for languages with different morphological typology. We model the machine translation task from English into three languages with distinct morphological characteristics: Arabic (templatic), Czech (fusional), and Turkish (agglutinative). We use the TED Talks corpora (Cettolo, 2012) for training the NMT models for these experiments. In §4.4.3, we conduct more experiments in Turkish to demonstrate the case of increased data sparsity using multi-domain training corpora, where we extend the training set using corpora from EU Bookshop (Skadin's et al., 2014), Global Voices, Gnome, Tatoeba, Ubuntu (Tiedemann, 2012), KDE4 (Tiedemann, 2009), Open Subtitles (Lison & Tiedemann, 2016) and SETIMES (Tyers & Alperen, 2010) $^{9}$ . The statistical characteristics of the training sets are given in Tables 4 and 5. We use the official evaluation sets of the IWSLT $^{10}$ for validating and testing the accuracy of the models. In order to increase the number of unknown and rare words in the evaluation sets we measure accuracy on large test sets combining evaluation sets from many years (Table 6 presents the evaluation sets used for development and testing). The accuracy of each model output is measured using BLEU (Papineni et al., 2002) and chrF3 (Popovic, 2015) metrics, whereas the significance of the improvements are computed using bootstrap hypothesis testing (Clark et al., 2011). In order to measure the accuracy in predicting the correct syntactic description of the references, we also compute BLEU
215
+
216
+ scores over the output sentences segmented using a morphological analyzer. We use the AlKhalil Morphosys (Boudchiche et al., 2017) for segmenting Arabic, Morphidata (Straková et al., 2014) for segmenting Czech and the morphological lexicon model of Offlazer (Offlazer & Kuruöz, 1994) and disambiguation tool of Sak (Sak et al., 2007) for segmenting Turkish sentences into sequences of lemmas and morphological features.
217
+
218
+ # 4.3 TRAINING SETTINGS
219
+
220
+ All models are implemented using gated recurrent units (GRU) (Cho et al., 2014), and have a single-layer bi-RNN encoder. The source sides of the data used for training all NMT models, and the target sides of the data used in training the subword-level NMT models are segmented using BPE with 16,000 merge rules. We implement all decoders using a comparable number of GRU parameters, including 3-layer stacked-GRU subword and character-level decoders, where the attention is computed after the $1^{st}$ layer (Barone et al., 2017) and a 3-layer hierarchical decoder which implements the attention mechanism after the $2^{nd}$ layer. All models use an embedding dimension and GRU size of 512. LMM uses the same hierarchical GRU architecture, where the middle layer is augmented using 4 multi-layer perceptrons with 256 hidden units. We use a lemma vector dimension of 150, 10 inflectional features (See §A.3 for experiments conducted to tune the feature dimensions) and set the regularization constant to $\rho = 0.4$ . All models are trained using the Adam optimizer (Kinga & Ba, 2014) with a batch size of 100, dropout rate of 0.2, learning rate of 0.0004 and learning rate decay of 0.8, applied when the perplexity does not decrease at a given epoch.[11] Translations are generated with beam search with a beam size of 5, where the hierarchical models implement the hierarchical beam search algorithm (Ataman et al., 2019).
221
+
222
+ # 4.4 RESULTS
223
+
224
+ # 4.4.1 THE EFFECT OF MORPHOLOGICAL TYPLOGY
225
+
226
+ The experiment results given in Table 1 show the performance of each model in translating English into Arabic, Czech and Turkish. In Turkish, the most sparse target language in our benchmark with rich agglutinative morphology, using character-based decoding shows to be more advantageous compared to the subword-level and hierarchical models, suggesting that increased granularity in the vocabulary units might aid in better learning accurate representations under conditions of high data sparsity. In Arabic, on the other hand, using a hierarchical decoding model shows to be advantageous compared to the subword and character-level models, as it might be useful in better learning syntactic dependencies. LMM obtains improvements of 0.51 and 0.30 BLEU points in Arabic and Turkish over the best performing baselines, respectively. The fact that our model can efficiently work in both Arabic and Turkish confirms that it can handle the generation of both concatenative and non-concatenative morphological transformations. The results in the English-to-Czech translation direction do not indicate a specific advantage of using either method for generating fusional morphology, where morphemes are already optimized at the surface level, although our model is still able to achieve translation accuracy comparable to the character and subword-level models.
227
+
228
+ # 4.4.2 PREDICTING UNSEEN WORDS
229
+
230
+ In addition to the general machine translation evaluation using automatic metrics, we perform a more focused statistical analysis to illustrate the performance of different methods in predicting unseen words by computing the average perplexity per character on the input sentences which contain out-of-vocabulary (OOV) words as suggested by Cotterell et al. (2018). We also analyze the outputs generated by each decoder in terms of the frequency of unknown words in each model output and the Kullback-Leibler (KL) divergence between the character trigram distributions of the references and outputs, which represents the coherence between the statistical distribution learned by each model and the reference translations.
231
+
232
+ Our analysis results generally confirm the advantage of increased granularity during the generation of unseen words, where the character-level decoder can generate a higher rate of unseen word forms and higher KL-divergence with the reference, suggesting superior ability in generalizing to new
233
+
234
+ <table><tr><td rowspan="3">Model</td><td colspan="9">(only in-domain)</td></tr><tr><td rowspan="2">BLEU</td><td rowspan="2">AR t-BLEU</td><td rowspan="2">chrF3</td><td colspan="3">CS</td><td colspan="3">TR</td></tr><tr><td>BLEU</td><td>t-BLEU</td><td>chrF3</td><td>BLEU</td><td>t-BLEU</td><td>chrF3</td></tr><tr><td>Subwords</td><td>14.27</td><td>51.24</td><td>0.3927</td><td>16.60</td><td>54.22</td><td>0.4123</td><td>8.52</td><td>38.03</td><td>0.3763</td></tr><tr><td>Char.s</td><td>12.72</td><td>47.56</td><td>0.3804</td><td>16.94</td><td>52.80</td><td>0.4103</td><td>10.63</td><td>40.63</td><td>0.3810</td></tr><tr><td>Hierarch.</td><td>15.55</td><td>54.01</td><td>0.4154</td><td>16.79</td><td>48.27</td><td>0.4068</td><td>9.74</td><td>35.91</td><td>0.3771</td></tr><tr><td>LMM</td><td>16.06</td><td>55.97</td><td>0.4251</td><td>16.97</td><td>50.35</td><td>0.4095</td><td>10.93</td><td>45.47</td><td>0.3889</td></tr></table>
235
+
236
+ <table><tr><td rowspan="3">Model</td><td colspan="3">(multi-domain)</td></tr><tr><td colspan="3">TR</td></tr><tr><td>BLEU</td><td>t-BLEU</td><td>chrF3</td></tr><tr><td>Subwords</td><td>10.42</td><td>42.65</td><td>0.3722</td></tr><tr><td>Char.s</td><td>8.94</td><td>37.12</td><td>0.3274</td></tr><tr><td>Hierarch.</td><td>10.35</td><td>40.54</td><td>0.3870</td></tr><tr><td>LMM</td><td>11.48</td><td>48.23</td><td>0.3939</td></tr></table>
237
+
238
+ output and not necessarily copying previous observations as the subword-level model, however, this advantage is more visible in Turkish and less in Czech or Arabic. The hierarchical decoder which performs the search at the level of words, on the other hand, behaves with less uncertainty in terms of the perplexity values although it cannot demonstrate the ability to generalize to new forms and neither can closely capture the actual distribution in the target language.
239
+
240
+ Due to its stochastic nature, our model yields higher perplexity values compared to the hierarchical model, whereas the values range between subword and character-based models, possibly finding an optimal level of granularity between the two solutions. The KL-divergence and OOV rates confirm that our model has the potential in better generalize to new word forms as well as different morphological typology.
241
+
242
+ Table 1: Above: Machine translation accuracy in Arabic (AR), Czech (CS) and Turkish (TR) in terms of BLEU and ChrF3 metrics as well as BLEU scores computed on the output sentences tagged with the morphological analyzer (t-BLEU) using in-domain training data. Below: The performance of models trained with multi-domain data. Best scores are in bold. All improvements over the baselines are statistically significant (p-value $< 0.05$ ).
243
+
244
+ <table><tr><td>Model</td><td>OOV%</td><td>AR Ppl</td><td>KL-Div</td><td>OOV%</td><td>CS Ppl</td><td>KL-Div</td><td>OOV%</td><td>TR Ppl</td><td>KL-Div</td></tr><tr><td>Subwords</td><td>1.75</td><td>2.84</td><td>12,871</td><td>2.39</td><td>2.62</td><td>8,954</td><td>3.54</td><td>2.78</td><td>17,342</td></tr><tr><td>Char.s</td><td>3.08</td><td>2.46</td><td>29,607</td><td>1.90</td><td>2.61</td><td>17,092</td><td>4.28</td><td>2.38</td><td>38,043</td></tr><tr><td>Hierarch.</td><td>1.96</td><td>2.59</td><td>15,064</td><td>0.87</td><td>2.65</td><td>29,022</td><td>1.53</td><td>2.46</td><td>68,743</td></tr><tr><td>LMM</td><td>3.78</td><td>2.68</td><td>9,892</td><td>2.4</td><td>2.71</td><td>14,296</td><td>4.89</td><td>2.59</td><td>38,930</td></tr></table>
245
+
246
+ Table 2: Percentage of out-of-vocabulary (OOV) words in the output, normalized perplexity measures (PPl) per characters and the KL divergence between the reference and outputs of systems trained with in-domain data on different language directions.
247
+
248
+ # 4.4.3 THE EFFECT OF DATA SIZE
249
+
250
+ Repeating the experiments in the English-to-Turkish translation direction by increasing the amount of training data with multi-domain corpora demonstrates a more challenging case, where there is a greater possibility of observing new words in varying context, either in the form of morphological inflections due to previously unobserved syntactic conditions, or a larger vocabulary extended with terminology from different domains. In this experiment, the character-level model experiences a drop in performance and its accuracy is much lower than the subword-level one, suggesting that its capacity cannot cope with the increased amount of training data. Empirical results suggest that with increased capacity, character-level models carry the potential to reach comparable performance to subword-level models (Cherry et al., 2018). On the other hand, our model reaches a much larger improvement of 0.82 BLEU points over the subword-level and 2.54 BLEU points over the character-level decoders, suggesting that it could make use of the increased amount of observations for improving the translation performance, which possibly aid the morphology model in becoming more accurate.
251
+
252
+ # 4.4.4 THE IMPACT OF INFLECTIONAL FEATURES
253
+
254
+ In order to understand whether the latent inflectional features in fact capture information about variations related to morphological transformations, we first try generating different surface forms of the same lemma by sampling a lemma vector with LMM for the input word 'go' and generating outputs using the fixed lemma vector and assigning different values to the inflectional features. In the second experiment, we assess the impact of the inflectional features by setting all features $f$ to 0 and translating a set of English sentences with varying inflected forms in Turkish. Table 3 presents different sets of feature values and the corresponding outputs generated by the decoder and the outputs generated with or without the inflectional component.
255
+
256
+ <table><tr><td>Features</td><td>Output</td><td>English Translation</td></tr><tr><td>[1,1,1,1,1,1,1,1,1]</td><td>git</td><td>go (informal)</td></tr><tr><td>[0,1,1,1,1,1,1,1,1]</td><td>‘a git</td><td>to go</td></tr><tr><td>[0,1,0,1,1,1,1,1,1,1]</td><td>‘da git</td><td>at go</td></tr><tr><td>[0,0,0,1,1,0,0,1,1,0]</td><td>gidin</td><td>go (formal)</td></tr><tr><td>[1,1,0,0,0,0,1,0,1,1]</td><td>gitmek</td><td>to go (infinitive)</td></tr><tr><td>[0,0,1,0,0,0,0,0,0,1]</td><td>gidiyor</td><td>(he/she/it is) going</td></tr><tr><td>[0,0,0,0,0,0,0,0,1,0]</td><td>gidip</td><td>by going (gerund)</td></tr><tr><td>[0,0,1,1,0,0,1,0,1,0]</td><td>gidiyoruz</td><td>(we are) going</td></tr><tr><td>Input</td><td>Output with f</td><td>Output without f</td></tr><tr><td>he went home.</td><td>eve gitti.</td><td>eve gitti.</td></tr><tr><td>he came from home.</td><td>evden geldi.</td><td>eve geldi.</td></tr><tr><td>it is good to be home.</td><td>evde olmak iyi.</td><td>evde olmak iyi.</td></tr><tr><td>his home has red walls.</td><td>evinde kırkızı duvarlar var.</td><td>evde kırkızı duvar var.</td></tr></table>
257
+
258
+ Table 3: Above: Outputs of LMM based on the lemma 'git' ('go') and different sets of inflectional features. Below: Examples of predicting inflections in context with or without using features.
259
+
260
+ The model generates different surface forms for different sets of features, confirming that the latent variables represent morphological features related to the infinitive form of the verb, as well as its formality conditions, prepositions, person, number and tense. Decoding the set of sentences given in the second experiment LMM always generates the correct inflectional form, although when the feature values are set to 0 the model omits some inflectional features in the output, suggesting that despite partially relying on the source-side context, it still encodes important information for generating correct surface forms in the inflectional features.
261
+
262
+ # 5 CONCLUSION
263
+
264
+ In this paper we presented a novel decoding architecture for NMT employing a hierarchical latent variable model to promote sparsity in lexical representations, which demonstrated promising application for morphologically-rich and low-resource languages. Our model generates words one character at a time by composing two latent features representing their lemmas and inflectional features. We evaluate our model against conventional open-vocabulary NMT solutions such as subword and character-level decoding methods in translation English into three morphologically-rich languages with different morphological typologies under low to mid-resource settings. Our results show that our model can significantly outperform subword-level NMT models, whereas demonstrates better capacity than character-level models in coping with increased amounts of data sparsity. We also conduct ablation studies on the impact of feature variations to the predictions, which prove that despite being completely unsupervised, our model can in fact manage to learn morphosyntactic information and make use of it to generalize to different surface forms of words.
265
+
266
+ # 6 ACKNOWLEDGMENTS
267
+
268
+ The authors would like to thank Marcello Federico, Orhan First, Adam Lopez, Graham Neubig, Akash Srivastava and Clara Vania for their feedback and suggestions. This project received funding from the European Union's Horizon 2020 research and innovation programme under grant agreements 825299 (GoURMET) and 688139 (SUMMA).
269
+
270
+ # REFERENCES
271
+
272
+ Duygu Ataman, Matteo Negri, Marco Turchi, and Marcello Federico. Linguistically-motivated vocabulary reduction for neural machine translation from Turkish to English. The Prague Bulletin of Mathematical Linguistics, 108(1):331-342, 2017.
273
+ Duygu Ataman, Orhan Firat, Mattia A Di Gangi, Marcello Federico, and Alexandra Birch. On the importance of word boundaries in character-level neural machine translation. arXiv preprint arXiv:1910.06753, 2019.
274
+ Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473, 2014.
275
+ Antonio Valerio Miceli Barone, Jindrich Helcl, Rico Sennrich, Barry Haddow, and Alexandra Birch. Deep architectures for neural machine translation. In Proceedings of the Second Conference on Machine Translation, pp. 99-107, 2017.
276
+ Joost Bastings, Wilker Aziz, and Ivan Titov. Interpretable neural predictions with differentiable binary variables. Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2963-2973, 2019.
277
+ Yoshua Bengio, Nicholas Léonard, and Aaron Courville. Estimating or propagating gradients through stochastic neurons for conditional computation. arXiv preprint arXiv:1308.3432, 2013.
278
+ Léon Bottou and Yann L. Cun. Large scale online learning. In S. Thrun, L. K. Saul, and B. Schölkopf (eds.), Advances in Neural Information Processing Systems 16, pp. 217-224. MIT Press, 2004.
279
+ Mohamed Boudchiche, Azzeddine Mazroui, Mohamed Ould Abdallahi Ould Bebah, Abdelhak Lakhouaja, and Abderrahim Boudlal. Alkhalil morpho sys 2: A robust arabic morpho-syntactic analyzer. Journal of King Saud University-Computer and Information Sciences, 29(2):141-146, 2017.
280
+ Mauro Cettolo. Wit3: Web inventory of transcribed and translated talks. In Conference of European Association for Machine Translation, pp. 261-268, 2012.
281
+ Colin Cherry, George Foster, Ankur Bapna, Orhan First, and Wolfgang Macherey. Revisiting character-based neural machine translation with capacity and compression. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp. 4295-4305, 2018.
282
+ Kyunghyun Cho, Bart van Merrienboer, Dzmitry Bahdanau, and Yoshua Bengio. On the properties of neural machine translation: Encoder-decoder approaches. In Proceedings of 8th Workshop on Syntax, Semantics and Structure in Statistical Translation (SSST), pp. 103-111, 2014.
283
+ Jonathan H Clark, Chris Dyer, Alon Lavie, and Noah A Smith. Better hypothesis testing for statistical machine translation: Controlling for optimizer instability. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pp. 176-181. Association for Computational Linguistics, 2011.
284
+ Ryan Cotterell, Sebastian J. Mielke, Jason Eisner, and Brian Roark. Are all languages equally hard to language-model? In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), pp. 536-541, 2018.
285
+ Hoang Cuong and Khalil Simaan. Latent domain translation models in mix-of-domains haystack. In Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics, pp. 1928-1939, 2014.
286
+ Anirudh Goyal Alias Parth Goyal, Alessandro Sordoni, Marc-Alexandre Côté, Nan Rosemary Ke, and Yoshua Bengio. Z-forcing: Training stochastic recurrent networks. In Advances in neural information processing systems, pp. 6713-6723, 2017.
287
+ Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with Gumbel-Softmax. International Conference on Learning Representations, 2017.
288
+
289
+ MichaelI. Jordan, Zoubin Ghahramani, TommiS. Jaakkola, and Lawrence K. Saul. An introduction to variational methods for graphical models. Machine Learning, 37(2):183-233, 1999.
290
+ D Kinga and J Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.
291
+ Diederik P. Kingma and Max Welling. Auto-encoding variational Bayes. arXiv preprint arXiv:1312.6114, 2013.
292
+ Guillaume Klein, Yoon Kim, Yuntian Deng, Jean Senellart, and Alexander Rush. OpenNMT: Open-source toolkit for neural machine translation. Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, System Demonstrations, pp. 67-72, 2017.
293
+ Julia Kreutzer and Artem Sokolov. Learning to segment inputs for nmt favors character-level processing. In Proceedings of the 15th International Workshop on Spoken Language Translation, pp. 166-172, 2018.
294
+ Ponnambalam Kumaraswamy. A generalized probability density function for double-bounded random processes. Journal of Hydrology, 46(1-2):79-88, 1980.
295
+ Wang Ling, Isabel Trancoso, Chris Dyer, and Alan W. Black. Character-based neural machine translation. arXiv preprint arXiv:1511.04586, 2015.
296
+ Pierre Lison and Jörg Tiedemann. Opensubtitles2016: Extracting large parallel corpora from movie and TV subtitles. 2016.
297
+ Christos Louizos, Max Welling, and Diederik P Kingma. Learning sparse neural networks through $\mathrm{L}_0$ regularization. arXiv preprint arXiv:1712.01312, 2018.
298
+ Minh-Thang Luong and Christopher D. Manning. Achieving open vocabulary neural machine translation with hybrid word-character models. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1054-1063, 2016.
299
+ Minh-Thang Luong, Hieu Pham, and Christopher D Manning. Effective approaches to attention-based neural machine translation. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pp. 1412-1421, 2015.
300
+ Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The concrete distribution: A continuous relaxation of discrete random variables. International Conference on Learning Representations, 2017.
301
+ Eric Nalisnick and Padhraic Smyth. Stick-breaking variational autoencoders. arXiv preprint arXiv:1605.06197, 2016.
302
+ Kemal Oflazer and Ilker Kuruöz. Tagging and morphological disambiguation of turkish text. In Proceedings of the fourth conference on Applied natural language processing, pp. 144-149. Association for Computational Linguistics, 1994.
303
+ Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. BLEU: a Method for Automatic Evaluation of Machine Translation. In Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics, pp. 311-318, 2002.
304
+ Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in Pytorch. NeurIPS Autodiff Workshop, 2017.
305
+ Maja Popović. chrf: character n-gram f-score for automatic mt evaluation. In Proceedings of the Tenth Workshop on Statistical Machine Translation, pp. 392–395, 2015.
306
+ Danilo Jimenez Rezende, Shakir Mohamed, and Daan Wierstra. Stochastic backpropagation and approximate inference in deep generative models. In Proceedings of the 31st International Conference on Machine Learning, volume 32 of Proceedings of Machine Learning Research, pp. 1278-1286, 2014.
307
+
308
+ Herbert Robbins and Sutton Monro. A stochastic approximation method. The Annals of Mathematical Statistics, 22(3):400-407, 1951.
309
+ Gozde Gul Sahin and Mark Steedman. Character-level models versus morphology in semantic role labeling. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 386-396, 2018.
310
+ Hasim Sak, Tunga Güngör, and Murat Saraçlar. Morphological disambiguation of turkish text with perceptron algorithm. In International Conference on Intelligent Text Processing and Computational Linguistics, pp. 107-118. Springer, 2007.
311
+ Philip Schulz, Wilker Aziz, and Trevor Cohn. A stochastic decoder for neural machine translation. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1243-1252, 2018.
312
+ Rico Sennrich. How grammatical is character-level neural machine translation? Assessing MT quality with contrastive translation pairs. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics (Volume 2, Short Papers), pp. 376-382, 2017.
313
+ Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 1715-1725, 2016.
314
+ Raivis Skadinš, Jörg Tiedemann, Roberts Rozis, and Daiga Deksne. Billions of parallel words for free: Building and using the eu bookshop corpus. In Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC), pp. 1850-1855, 2014.
315
+ Noah A Smith. Linguistic structure prediction. Synthesis lectures on human language technologies, 4(2):1-274, 2011.
316
+ Jana Straková, Milan Straka, and Jan Hajic. Open-Source Tools for Morphology, Lemmatization, POS Tagging and Named Entity Recognition. In Proceedings of 52nd Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pp. 13-18. Association for Computational Linguistics, 2014.
317
+ Jörg Tiedemann. News from opus-a collection of multilingual parallel corpora with tools and interfaces. In *Recent advances in natural language processing*, volume 5, pp. 237-248, 2009.
318
+ Jörg Tiedemann. Parallel data, tools and interfaces in opus. In Proceedings of the Seventh International Conference on Language Resources and Evaluation (LREC), pp. 2214-2218, 2012.
319
+ Francis M Tyers and Murat Serdar Alperen. South-east european times: A parallel corpus of balkan languages. In Proceedings of the LREC Workshop on Exploitation of Multilingual Resources and Tools for Central and (South-) Eastern European Languages, pp. 49-53, 2010.
320
+ Clara Vania and Adam Lopez. From characters to words to in between: Do we capture morphology? In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 2016-2027, 2017.
321
+ Ronald J Williams. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine learning, 8(3-4):229-256, 1992.
322
+ Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. Google's neural machine translation system: Bridging the gap between human and machine translation. 2016.
323
+ Biao Zhang, Deyi Xiong, Jinsong Su, Hong Duan, and Min Zhang. Variational neural machine translation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pp. 521-530, 2016.
324
+ Chunting Zhou and Graham Neubig. Multi-space variational encoder-decoders for semi-supervised labeled sequence transduction. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 310-320, 2017.
325
+
326
+ # A APPENDIX
327
+
328
+ A.1 THE STATISTICAL CHARACTERISTICS OF EXPERIMENTAL DATA
329
+
330
+ <table><tr><td rowspan="2">Language Pair</td><td rowspan="2"># sentences</td><td colspan="2"># tokens</td><td colspan="2"># types</td></tr><tr><td>Source</td><td>Target</td><td>Source</td><td>Target</td></tr><tr><td>English-Arabic</td><td>238K</td><td>5M</td><td>4M</td><td>120K</td><td>220K</td></tr><tr><td>English-Czech</td><td>118K</td><td>2M</td><td>2M</td><td>50K</td><td>118K</td></tr><tr><td>English-Turkish</td><td>136K</td><td>2M</td><td>3M</td><td>53K</td><td>171K</td></tr></table>
331
+
332
+ Table 4: Training sets based on the TED Talks corpora (M: Million, K: Thousand).
333
+
334
+ <table><tr><td rowspan="2">Language Pair</td><td rowspan="2"># sentences</td><td colspan="2"># tokens</td><td colspan="2"># types</td></tr><tr><td>Source</td><td>Target</td><td>Source</td><td>Target</td></tr><tr><td>English-Turkish</td><td>434K</td><td>8M</td><td>6M</td><td>135K</td><td>373K</td></tr></table>
335
+
336
+ Table 5: The multi-domain training set (M: Million, K: Thousand).
337
+
338
+ <table><tr><td>Language</td><td colspan="2">Data sets</td><td># sentences</td></tr><tr><td rowspan="3">English-Arabic</td><td>Development</td><td>dev2010, test2010</td><td>6K</td></tr><tr><td rowspan="2">Testing</td><td>test2011, test2012</td><td rowspan="2">4K</td></tr><tr><td>test2013, test2014</td></tr><tr><td rowspan="3">English-Czech</td><td>Development</td><td>dev2010, test2010,</td><td>3K</td></tr><tr><td rowspan="2">Testing</td><td>test2011</td><td rowspan="2">3K</td></tr><tr><td>test2012, test2013</td></tr><tr><td rowspan="2">English-Turkish</td><td>Development</td><td>dev2010, test2010</td><td>3K</td></tr><tr><td>Testing</td><td>test2011, test2012</td><td>3K</td></tr></table>
339
+
340
+ Table 6: Development and testing sets (K: Thousand).
341
+
342
+ # A.2 THE KUMARASWAMY DISTRIBUTION
343
+
344
+ ![](images/581355b878caa4c7b22c96e6f10c54da39032ea97d7fea880de0ac9c9a2ccd4d.jpg)
345
+ Figure 2: The top row shows the density function of the continuous base distribution over $(0,1)$ . The middle row shows the result of stretching it to include 0 and 1 in its support. The bottom row shows the result of rectification: probability mass under $(l,0)$ collapses to 0 and probability mass under $(1,r)$ collapses to 1, which cause sparse outcomes to have non-zero mass. Varying the shape parameters $(a,b)$ of the underlying continuous distribution changes how much mass concentrates outside the support $(0,1)$ in the stretched density, and hence the probability of sampling sparse outcomes.
346
+
347
+ # A.3 THE EFFECT OF FEATURE DIMENSIONS
348
+
349
+ We investigate the optimal lemma and inflectional feature sizes by measuring the accuracy in English-to-Turkish translation using different feature vector dimensions. The results given in Figure 3 show that gradually compressing the word representations computed by recurrent hidden states, with an original dimension of 512, from 500 to 100, leads to increased output accuracy, suggesting that encoding more compact representations might provide the model with a better generalization capability. Our results also show that using a feature dimension of 10 is sufficient in reaching the best accuracy.
350
+
351
+ ![](images/24aa99ee8b4171825d54a0b650e1e58336ee980ab5adf8d2441e3a67347c39ce.jpg)
352
+ Figure 3: The effect of feature dimensions on translation accuracy in Turkish.
alatentmorphologymodelforopenvocabularyneuralmachinetranslation/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c40bc621b14cdb716e5f71583448624dab3b139d60219d413e568af42f4b4151
3
+ size 420283
alatentmorphologymodelforopenvocabularyneuralmachinetranslation/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b8c9cd23d399a228c09f63fbb0eaa671c25f7112a5773b8f5993d40c94dcfb4
3
+ size 467664
albertalitebertforselfsupervisedlearningoflanguagerepresentations/ca492a7d-65ed-4555-8eed-cbaf0838bc02_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23a426c58169ab3b0bbe6f6bef325f8d18ac52bdb893674b44e1616390dbd61d
3
+ size 104286
albertalitebertforselfsupervisedlearningoflanguagerepresentations/ca492a7d-65ed-4555-8eed-cbaf0838bc02_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e75d142735f50df5748ef4dcebb3750df2158dd1880645ab92664d965c38f172
3
+ size 125037
albertalitebertforselfsupervisedlearningoflanguagerepresentations/ca492a7d-65ed-4555-8eed-cbaf0838bc02_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46d207853a7df43a1f39d9da3836360837124970abb409149857952f59a130c6
3
+ size 350107
albertalitebertforselfsupervisedlearningoflanguagerepresentations/full.md ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ALBERT: A LITE BERT FOR SELF-SUPERVISED LEARNING OF LANGUAGE REPRESENTATIONS
2
+
3
+ Zhenzhong Lan $^{1}$ Mingda Chen $^{2*}$ Sebastian Goodman $^{1}$ Kevin Gimpel $^{2}$
4
+
5
+ Piyush Sharma<sup>1</sup> Radu Soricut<sup>1</sup>
6
+
7
+ $^{1}$ Google Research $^{2}$ Toyota Technological Institute at Chicago
8
+
9
+ {lanzhzh, seabass, piyushsharma, rsoricut}@google.com {mchen, kgimpel}@ttic.edu
10
+
11
+ # ABSTRACT
12
+
13
+ Increasing model size when pretraining natural language representations often results in improved performance on downstream tasks. However, at some point further model increases become harder due to GPU/TPU memory limitations and longer training times. To address these problems, we present two parameter-reduction techniques to lower memory consumption and increase the training speed of BERT (Devlin et al., 2019). Comprehensive empirical evidence shows that our proposed methods lead to models that scale much better compared to the original BERT. We also use a self-supervised loss that focuses on modeling inter-sentence coherence, and show it consistently helps downstream tasks with multi-sentence inputs. As a result, our best model establishes new state-of-the-art results on the GLUE, RACE, and SQuAD benchmarks while having fewer parameters compared to BERT-large. The code and the pretrained models are available at https://github.com/google-research/ALBERT.
14
+
15
+ # 1 INTRODUCTION
16
+
17
+ Full network pre-training (Dai & Le, 2015; Radford et al., 2018; Devlin et al., 2019; Howard & Ruder, 2018) has led to a series of breakthroughs in language representation learning. Many nontrivial NLP tasks, including those that have limited training data, have greatly benefited from these pre-trained models. One of the most compelling signs of these breakthroughs is the evolution of machine performance on a reading comprehension task designed for middle and high-school English exams in China, the RACE test (Lai et al., 2017): the paper that originally describes the task and formulates the modeling challenge reports then state-of-the-art machine accuracy at $44.1\%$ ; the latest published result reports their model performance at $83.2\%$ (Liu et al., 2019); the work we present here pushes it even higher to $89.4\%$ , a stunning $45.3\%$ improvement that is mainly attributable to our current ability to build high-performance pretrained language representations.
18
+
19
+ Evidence from these improvements reveals that a large network is of crucial importance for achieving state-of-the-art performance (Devlin et al., 2019; Radford et al., 2019). It has become common practice to pre-train large models and distill them down to smaller ones (Sun et al., 2019; Turc et al., 2019) for real applications. Given the importance of model size, we ask: Is having better NLP models as easy as having larger models?
20
+
21
+ An obstacle to answering this question is the memory limitations of available hardware. Given that current state-of-the-art models often have hundreds of millions or even billions of parameters, it is easy to hit these limitations as we try to scale our models. Training speed can also be significantly hampered in distributed training, as the communication overhead is directly proportional to the number of parameters in the model.
22
+
23
+ Existing solutions to the aforementioned problems include model parallelization (Shazeer et al., 2018; Shoeybi et al., 2019) and clever memory management (Chen et al., 2016; Gomez et al., 2017).
24
+
25
+ These solutions address the memory limitation problem, but not the communication overhead. In this paper, we address all of the aforementioned problems, by designing A Lite BERT (ALBERT) architecture that has significantly fewer parameters than a traditional BERT architecture.
26
+
27
+ ALBERT incorporates two parameter reduction techniques that lift the major obstacles in scaling pre-trained models. The first one is a factorized embedding parameterization. By decomposing the large vocabulary embedding matrix into two small matrices, we separate the size of the hidden layers from the size of vocabulary embedding. This separation makes it easier to grow the hidden size without significantly increasing the parameter size of the vocabulary embeddings. The second technique is cross-layer parameter sharing. This technique prevents the parameter from growing with the depth of the network. Both techniques significantly reduce the number of parameters for BERT without seriously hurting performance, thus improving parameter-efficiency. An ALBERT configuration similar to BERT-large has 18x fewer parameters and can be trained about 1.7x faster. The parameter reduction techniques also act as a form of regularization that stabilizes the training and helps with generalization.
28
+
29
+ To further improve the performance of ALBERT, we also introduce a self-supervised loss for sentence-order prediction (SOP). SOP primary focuses on inter-sentence coherence and is designed to address the ineffectiveness (Yang et al., 2019; Liu et al., 2019) of the next sentence prediction (NSP) loss proposed in the original BERT.
30
+
31
+ As a result of these design decisions, we are able to scale up to much larger ALBERT configurations that still have fewer parameters than BERT-large but achieve significantly better performance. We establish new state-of-the-art results on the well-known GLUE, SQuAD, and RACE benchmarks for natural language understanding. Specifically, we push the RACE accuracy to $89.4\%$ , the GLUE benchmark to 89.4, and the F1 score of SQuAD 2.0 to 92.2.
32
+
33
+ # 2 RELATED WORK
34
+
35
+ # 2.1 SCALING UP REPRESENTATION LEARNING FOR NATURAL LANGUAGE
36
+
37
+ Learning representations of natural language has been shown to be useful for a wide range of NLP tasks and has been widely adopted (Mikolov et al., 2013; Le & Mikolov, 2014; Dai & Le, 2015; Peters et al., 2018; Devlin et al., 2019; Radford et al., 2018; 2019). One of the most significant changes in the last two years is the shift from pre-training word embeddings, whether standard (Mikolov et al., 2013; Pennington et al., 2014) or contextualized (McCann et al., 2017; Peters et al., 2018), to full-network pre-training followed by task-specific fine-tuning (Dai & Le, 2015; Radford et al., 2018; Devlin et al., 2019). In this line of work, it is often shown that larger model size improves performance. For example, Devlin et al. (2019) show that across three selected natural language understanding tasks, using larger hidden size, more hidden layers, and more attention heads always leads to better performance. However, they stop at a hidden size of 1024, presumably because of the model size and computation cost problems.
38
+
39
+ It is difficult to experiment with large models due to computational constraints, especially in terms of GPU/TPU memory limitations. Given that current state-of-the-art models often have hundreds of millions or even billions of parameters, we can easily hit memory limits. To address this issue, Chen et al. (2016) propose a method called gradient checkpointing to reduce the memory requirement to be sublinear at the cost of an extra forward pass. Gomez et al. (2017) propose a way to reconstruct each layer's activations from the next layer so that they do not need to store the intermediate activations. Both methods reduce the memory consumption at the cost of speed. Raffel et al. (2019) proposed to use model parallelization to train a giant model. In contrast, our parameter-reduction techniques reduce memory consumption and increase training speed.
40
+
41
+ # 2.2 CROSS-LAYER PARAMETER SHARING
42
+
43
+ The idea of sharing parameters across layers has been previously explored with the Transformer architecture (Vaswani et al., 2017), but this prior work has focused on training for standard encoder-decoder tasks rather than the pretraining/finetuning setting. Different from our observations, Dehghani et al. (2018) show that networks with cross-layer parameter sharing (Universal Transformer, UT) get better performance on language modeling and subject-verb agreement than the standard
44
+
45
+ transformer. Very recently, Bai et al. (2019) propose a Deep Equilibrium Model (DQE) for transformer networks and show that DQE can reach an equilibrium point for which the input embedding and the output embedding of a certain layer stay the same. Our observations show that our embeddings are oscillating rather than converging. Hao et al. (2019) combine a parameter-sharing transformer with the standard one, which further increases the number of parameters of the standard transformer.
46
+
47
+ # 2.3 SENTENCE ORDERING OBJECTIVES
48
+
49
+ ALBERT uses a pretraining loss based on predicting the ordering of two consecutive segments of text. Several researchers have experimented with pretraining objectives that similarly relate to discourse coherence. Coherence and cohesion in discourse have been widely studied and many phenomena have been identified that connect neighboring text segments (Hobbs, 1979; Halliday & Hasan, 1976; Grosz et al., 1995). Most objectives found effective in practice are quite simple. Skipthought (Kiros et al., 2015) and FastSent (Hill et al., 2016) sentence embeddings are learned by using an encoding of a sentence to predict words in neighboring sentences. Other objectives for sentence embedding learning include predicting future sentences rather than only neighbors (Gan et al., 2017) and predicting explicit discourse markers (Jernite et al., 2017; Nie et al., 2019). Our loss is most similar to the sentence ordering objective of Jernite et al. (2017), where sentence embeddings are learned in order to determine the ordering of two consecutive sentences. Unlike most of the above work, however, our loss is defined on textual segments rather than sentences. BERT (Devlin et al., 2019) uses a loss based on predicting whether the second segment in a pair has been swapped with a segment from another document. We compare to this loss in our experiments and find that sentence ordering is a more challenging pretraining task and more useful for certain downstream tasks. Concurrently to our work, Wang et al. (2019) also try to predict the order of two consecutive segments of text, but they combine it with the original next sentence prediction in a three-way classification task rather than empirically comparing the two.
50
+
51
+ # 3 THE ELEMENTS OF ALBERT
52
+
53
+ In this section, we present the design decisions for ALBERT and provide quantified comparisons against corresponding configurations of the original BERT architecture (Devlin et al., 2019).
54
+
55
+ # 3.1 MODEL ARCHITECTURE CHOICES
56
+
57
+ The backbone of the ALBERT architecture is similar to BERT in that it uses a transformer encoder (Vaswani et al., 2017) with GELU nonlinearities (Hendrycks & Gimpel, 2016). We follow the BERT notation conventions and denote the vocabulary embedding size as $E$ , the number of encoder layers as $L$ , and the hidden size as $H$ . Following Devlin et al. (2019), we set the feed-forward/filter size to be $4H$ and the number of attention heads to be $H / 64$ .
58
+
59
+ There are three main contributions that ALBERT makes over the design choices of BERT.
60
+
61
+ Factorized embedding parameterization. In BERT, as well as subsequent modeling improvements such as XLNet (Yang et al., 2019) and RoBERTa (Liu et al., 2019), the WordPiece embedding size $E$ is tied with the hidden layer size $H$ , i.e., $E \equiv H$ . This decision appears suboptimal for both modeling and practical reasons, as follows.
62
+
63
+ From a modeling perspective, WordPiece embeddings are meant to learn context-independent representations, whereas hidden-layer embeddings are meant to learn context-dependent representations. As experiments with context length indicate (Liu et al., 2019), the power of BERT-like representations comes from the use of context to provide the signal for learning such context-dependent representations. As such, untying the WordPiece embedding size $E$ from the hidden layer size $H$ allows us to make a more efficient usage of the total model parameters as informed by modeling needs, which dictate that $H \gg E$ .
64
+
65
+ From a practical perspective, natural language processing usually require the vocabulary size $V$ to be large. If $E \equiv H$ , then increasing $H$ increases the size of the embedding matrix, which has size
66
+
67
+ $V \times E$ . This can easily result in a model with billions of parameters, most of which are only updated sparsely during training.
68
+
69
+ Therefore, for ALBERT we use a factorization of the embedding parameters, decomposing them into two smaller matrices. Instead of projecting the one-hot vectors directly into the hidden space of size $H$ , we first project them into a lower dimensional embedding space of size $E$ , and then project it to the hidden space. By using this decomposition, we reduce the embedding parameters from $O(V \times H)$ to $O(V \times E + E \times H)$ . This parameter reduction is significant when $H \gg E$ . We choose to use the same $E$ for all word pieces because they are much more evenly distributed across documents compared to whole-word embedding, where having different embedding size (Grave et al. (2017); Baevski & Auli (2018); Dai et al. (2019)) for different words is important.
70
+
71
+ Cross-layer parameter sharing. For ALBERT, we propose cross-layer parameter sharing as another way to improve parameter efficiency. There are multiple ways to share parameters, e.g., only sharing feed-forward network (FFN) parameters across layers, or only sharing attention parameters. The default decision for ALBERT is to share all parameters across layers. All our experiments use this default decision unless otherwise specified. We compare this design decision against other strategies in our experiments in Sec. 4.5.
72
+
73
+ Similar strategies have been explored by Dehghani et al. (2018) (Universal Transformer, UT) and Bai et al. (2019) (Deep Equilibrium Models, DQE) for Transformer networks. Different from our observations, Dehghani et al. (2018) show that UT outperforms a vanilla Transformer. Bai et al. (2019) show that their DQEs reach an equilibrium point for which the input and output embedding of a certain layer stay the same. Our measurement on the L2 distances and cosine similarity show that our embeddings are oscillating rather than converging.
74
+
75
+ ![](images/e9c7e0aa628c7132cc4211bc24456f81ee1d8dddaf1d13879ac07e5295de9fdb.jpg)
76
+ Figure 1: The L2 distances and cosine similarity (in terms of degree) of the input and output embedding of each layer for BERT-large and ALBERT-large.
77
+
78
+ ![](images/f1d65322d22d936be5975593e3f3247b84b1132710d667d129294296a19f6825.jpg)
79
+
80
+ Figure 1 shows the L2 distances and cosine similarity of the input and output embeddings for each layer, using BERT-large and ALBERT-large configurations (see Table 1). We observe that the transitions from layer to layer are much smoother for ALBERT than for BERT. These results show that weight-sharing has an effect on stabilizing network parameters. Although there is a drop for both metrics compared to BERT, they nevertheless do not converge to 0 even after 24 layers. This shows that the solution space for ALBERT parameters is very different from the one found by DQE.
81
+
82
+ Inter-sentence coherence loss. In addition to the masked language modeling (MLM) loss (Devlin et al., 2019), BERT uses an additional loss called next-sentence prediction (NSP). NSP is a binary classification loss for predicting whether two segments appear consecutively in the original text, as follows: positive examples are created by taking consecutive segments from the training corpus; negative examples are created by pairing segments from different documents; positive and negative examples are sampled with equal probability. The NSP objective was designed to improve performance on downstream tasks, such as natural language inference, that require reasoning about the relationship between sentence pairs. However, subsequent studies (Yang et al., 2019; Liu et al., 2019) found NSP's impact unreliable and decided to eliminate it, a decision supported by an improvement in downstream task performance across several tasks.
83
+
84
+ We conjecture that the main reason behind NSP's ineffectiveness is its lack of difficulty as a task, as compared to MLM. As formulated, NSP conflates topic prediction and coherence prediction in a
85
+
86
+ <table><tr><td colspan="2">Model</td><td>Parameters</td><td>Layers</td><td>Hidden</td><td>Embedding</td><td>Parameter-sharing</td></tr><tr><td rowspan="2">BERT</td><td>base</td><td>108M</td><td>12</td><td>768</td><td>768</td><td>False</td></tr><tr><td>large</td><td>334M</td><td>24</td><td>1024</td><td>1024</td><td>False</td></tr><tr><td rowspan="4">ALBERT</td><td>base</td><td>12M</td><td>12</td><td>768</td><td>128</td><td>True</td></tr><tr><td>large</td><td>18M</td><td>24</td><td>1024</td><td>128</td><td>True</td></tr><tr><td>xlarge</td><td>60M</td><td>24</td><td>2048</td><td>128</td><td>True</td></tr><tr><td>xxlarge</td><td>235M</td><td>12</td><td>4096</td><td>128</td><td>True</td></tr></table>
87
+
88
+ Table 1: The configurations of the main BERT and ALBERT models analyzed in this paper.
89
+
90
+ single task $^2$ . However, topic prediction is easier to learn compared to coherence prediction, and also overlaps more with what is learned using the MLM loss.
91
+
92
+ We maintain that inter-sentence modeling is an important aspect of language understanding, but we propose a loss based primarily on coherence. That is, for ALBERT, we use a sentence-order prediction (SOP) loss, which avoids topic prediction and instead focuses on modeling inter-sentence coherence. The SOP loss uses as positive examples the same technique as BERT (two consecutive segments from the same document), and as negative examples the same two consecutive segments but with their order swapped. This forces the model to learn finer-grained distinctions about discourse-level coherence properties. As we show in Sec. 4.6, it turns out that NSP cannot solve the SOP task at all (i.e., it ends up learning the easier topic-prediction signal, and performs at random-baseline level on the SOP task), while SOP can solve the NSP task to a reasonable degree, presumably based on analyzing misaligned coherence cues. As a result, ALBERT models consistently improve downstream task performance for multi-sentence encoding tasks.
93
+
94
+ # 3.2 MODEL SETUP
95
+
96
+ We present the differences between BERT and ALBERT models with comparable hyperparameter settings in Table 1. Due to the design choices discussed above, ALBERT models have much smaller parameter size compared to corresponding BERT models.
97
+
98
+ For example, ALBERT-large has about 18x fewer parameters compared to BERT-large, 18M versus 334M. An ALBERT-xlarge configuration with $H = 2048$ has only 60M parameters and an ALBERT-xxlarge configuration with $H = 4096$ has 233M parameters, i.e., around 70% of BERT-large's parameters. Note that for ALBERT-xxlarge, we mainly report results on a 12-layer network because a 24-layer network (with the same configuration) obtains similar results but is computationally more expensive.
99
+
100
+ This improvement in parameter efficiency is the most important advantage of ALBERT's design choices. Before we can quantify this advantage, we need to introduce our experimental setup in more detail.
101
+
102
+ # 4 EXPERIMENTAL RESULTS
103
+
104
+ # 4.1 EXPERIMENTAL SETUP
105
+
106
+ To keep the comparison as meaningful as possible, we follow the BERT (Devlin et al., 2019) setup in using the BOOKCORPUS (Zhu et al., 2015) and English Wikipedia (Devlin et al., 2019) for pretraining baseline models. These two corpora consist of around 16GB of uncompressed text. We format our inputs as "[CLS] $x_{1}$ [SEP] $x_{2}$ [SEP]", where $x_{1} = x_{1,1}, x_{1,2} \cdots$ and $x_{2} = x_{1,1}, x_{1,2} \cdots$ are two segments. We always limit the maximum input length to 512, and randomly generate input sequences shorter than 512 with a probability of $10\%$ . Like BERT, we use a vocabulary size of 30,000, tokenized using SentencePiece (Kudo & Richardson, 2018) as in XLNet (Yang et al., 2019).
107
+
108
+ We generate masked inputs for the MLM targets using $n$ -gram masking (Joshi et al., 2019), with the length of each $n$ -gram mask selected randomly. The probability for the length $n$ is given by
109
+
110
+ $$
111
+ p (n) = \frac {1 / n}{\sum_ {k = 1} ^ {N} 1 / k}
112
+ $$
113
+
114
+ We set the maximum length of $n$ -gram (i.e., $n$ ) to be 3 (i.e., the MLM target can consist of up to a 3-gram of complete words, such as "White House correspondents").
115
+
116
+ All the model updates use a batch size of 4096 and a LAMB optimizer with learning rate 0.00176 (You et al., 2019). We train all models for 125,000 steps unless otherwise specified. Training was done on Cloud TPU V3. The number of TPUs used for training ranged from 64 to 512, depending on model size.
117
+
118
+ The experimental setup described in this section is used for all of our own versions of BERT as well as ALBERT models, unless otherwise specified.
119
+
120
+ # 4.2 EVALUATION BENCHMARKS
121
+
122
+ # 4.2.1 INTRINSIC EVALUATION
123
+
124
+ To monitor the training progress, we create a development set based on the development sets from SQuAD and RACE using the same procedure as in Sec. 4.1. We report accuracies for both MLM and sentence classification tasks. Note that we only use this set to check how the model is converging; it has not been used in a way that would affect the performance of any downstream evaluation, such as via model selection.
125
+
126
+ # 4.2.2 DOWNSSTREAM EVALUATION
127
+
128
+ Following Yang et al. (2019) and Liu et al. (2019), we evaluate our models on three popular benchmarks: The General Language Understanding Evaluation (GLUE) benchmark (Wang et al., 2018), two versions of the Stanford Question Answering Dataset (SQuAD; Rajpurkar et al., 2016; 2018), and the ReAding Comprehension from Examinations (RACE) dataset (Lai et al., 2017). For completeness, we provide description of these benchmarks in Appendix A.3. As in (Liu et al., 2019), we perform early stopping on the development sets, on which we report all comparisons except for our final comparisons based on the task leaderboards, for which we also report test set results. For GLUE datasets that have large variances on the dev set, we report median over 5 runs.
129
+
130
+ # 4.3 OVERALL COMPARISON BETWEEN BERT AND ALBERT
131
+
132
+ We are now ready to quantify the impact of the design choices described in Sec. 3, specifically the ones around parameter efficiency. The improvement in parameter efficiency showcases the most important advantage of ALBERT's design choices, as shown in Table 2: with only around $70\%$ of BERT-large's parameters, ALBERT-xxlarge achieves significant improvements over BERT-large, as measured by the difference on development set scores for several representative downstream tasks: SQuAD v1.1 $(+1.9\%)$ , SQuAD v2.0 $(+3.1\%)$ , MNLI $(+1.4\%)$ , SST-2 $(+2.2\%)$ , and RACE $(+8.4\%)$ .
133
+
134
+ Another interesting observation is the speed of data throughput at training time under the same training configuration (same number of TPUs). Because of less communication and fewer computations, ALBERT models have higher data throughput compared to their corresponding BERT models. If we use BERT-large as the baseline, we observe that ALBERT-large is about 1.7 times faster in iterating through the data while ALBERT-xxlarge is about 3 times slower because of the larger structure.
135
+
136
+ Next, we perform ablation experiments that quantify the individual contribution of each of the design choices for ALBERT.
137
+
138
+ # 4.4 FACTORIZED EMBEDDING PARAMETERIZATION
139
+
140
+ Table 3 shows the effect of changing the vocabulary embedding size $E$ using an ALBERT-base configuration setting (see Table 1), using the same set of representative downstream tasks. Under the non-shared condition (BERT-style), larger embedding sizes give better performance, but not by
141
+
142
+ <table><tr><td colspan="2">Model</td><td>Parameters</td><td>SQuAD1.1</td><td>SQuAD2.0</td><td>MNLI</td><td>SST-2</td><td>RACE</td><td>Avg</td><td>Speedup</td></tr><tr><td rowspan="2">BERT</td><td>base</td><td>108M</td><td>90.4/83.2</td><td>80.4/77.6</td><td>84.5</td><td>92.8</td><td>68.2</td><td>82.3</td><td>4.7x</td></tr><tr><td>large</td><td>334M</td><td>92.2/85.5</td><td>85.0/82.2</td><td>86.6</td><td>93.0</td><td>73.9</td><td>85.2</td><td>1.0</td></tr><tr><td rowspan="4">ALBERT</td><td>base</td><td>12M</td><td>89.3/82.3</td><td>80.0/77.1</td><td>81.6</td><td>90.3</td><td>64.0</td><td>80.1</td><td>5.6x</td></tr><tr><td>large</td><td>18M</td><td>90.6/83.9</td><td>82.3/79.4</td><td>83.5</td><td>91.7</td><td>68.5</td><td>82.4</td><td>1.7x</td></tr><tr><td>xlarge</td><td>60M</td><td>92.5/86.1</td><td>86.1/83.1</td><td>86.4</td><td>92.4</td><td>74.8</td><td>85.5</td><td>0.6x</td></tr><tr><td>xxlarge</td><td>235M</td><td>94.1/88.3</td><td>88.1/85.1</td><td>88.0</td><td>95.2</td><td>82.3</td><td>88.7</td><td>0.3x</td></tr></table>
143
+
144
+ much. Under the all-shared condition (ALBERT-style), an embedding of size 128 appears to be the best. Based on these results, we use an embedding size $E = 128$ in all future settings, as a necessary step to do further scaling.
145
+
146
+ Table 2: Dev set results for models pretrained over BOOKCORPUS and Wikipedia for 125k steps. Here and everywhere else, the Avg column is computed by averaging the scores of the downstream tasks to its left (the two numbers of F1 and EM for each SQuAD are first averaged).
147
+
148
+ <table><tr><td>Model</td><td>E</td><td>Parameters</td><td>SQuAD1.1</td><td>SQuAD2.0</td><td>MNLI</td><td>SST-2</td><td>RACE</td><td>Avg</td></tr><tr><td>ALBERT</td><td>64</td><td>87M</td><td>89.9/82.9</td><td>80.1/77.8</td><td>82.9</td><td>91.5</td><td>66.7</td><td>81.3</td></tr><tr><td>base</td><td>128</td><td>89M</td><td>89.9/82.8</td><td>80.3/77.3</td><td>83.7</td><td>91.5</td><td>67.9</td><td>81.7</td></tr><tr><td>not-shared</td><td>256</td><td>93M</td><td>90.2/83.2</td><td>80.3/77.4</td><td>84.1</td><td>91.9</td><td>67.3</td><td>81.8</td></tr><tr><td></td><td>768</td><td>108M</td><td>90.4/83.2</td><td>80.4/77.6</td><td>84.5</td><td>92.8</td><td>68.2</td><td>82.3</td></tr><tr><td>ALBERT</td><td>64</td><td>10M</td><td>88.7/81.4</td><td>77.5/74.8</td><td>80.8</td><td>89.4</td><td>63.5</td><td>79.0</td></tr><tr><td>base</td><td>128</td><td>12M</td><td>89.3/82.3</td><td>80.0/77.1</td><td>81.6</td><td>90.3</td><td>64.0</td><td>80.1</td></tr><tr><td>all-shared</td><td>256</td><td>16M</td><td>88.8/81.5</td><td>79.1/76.3</td><td>81.5</td><td>90.3</td><td>63.4</td><td>79.6</td></tr><tr><td></td><td>768</td><td>31M</td><td>88.6/81.5</td><td>79.2/76.6</td><td>82.0</td><td>90.6</td><td>63.3</td><td>79.8</td></tr></table>
149
+
150
+ # 4.5 CROSS-LAYER PARAMETER SHARING
151
+
152
+ Table 4 presents experiments for various cross-layer parameter-sharing strategies, using an ALBERT-base configuration (Table 1) with two embedding sizes ( $E = 768$ and $E = 128$ ). We compare the all-shared strategy (ALBERT-style), the not-shared strategy (BERT-style), and intermediate strategies in which only the attention parameters are shared (but not the FNN ones) or only the FFN parameters are shared (but not the attention ones).
153
+
154
+ The all-shared strategy hurts performance under both conditions, but it is less severe for $E = 128$ (-1.5 on Avg) compared to $E = 768$ (-2.5 on Avg). In addition, most of the performance drop appears to come from sharing the FFN-layer parameters, while sharing the attention parameters results in no drop when $E = 128$ (+0.1 on Avg), and a slight drop when $E = 768$ (-0.7 on Avg).
155
+
156
+ There are other strategies of sharing the parameters cross layers. For example, We can divide the $L$ layers into $N$ groups of size $M$ , and each size- $M$ group shares parameters. Overall, our experimental results show that the smaller the group size $M$ is, the better the performance we get. However, decreasing group size $M$ also dramatically increase the number of overall parameters. We choose all-shared strategy as our default choice.
157
+
158
+ Table 3: The effect of vocabulary embedding size on the performance of ALBERT-base.
159
+
160
+ <table><tr><td></td><td>Model</td><td>Parameters</td><td>SQuAD1.1</td><td>SQuAD2.0</td><td>MNLI</td><td>SST-2</td><td>RACE</td><td>Avg</td></tr><tr><td rowspan="4">ALBERT
161
+ base
162
+ E=768</td><td>all-shared</td><td>31M</td><td>88.6/81.5</td><td>79.2/76.6</td><td>82.0</td><td>90.6</td><td>63.3</td><td>79.8</td></tr><tr><td>shared-attention</td><td>83M</td><td>89.9/82.7</td><td>80.0/77.2</td><td>84.0</td><td>91.4</td><td>67.7</td><td>81.6</td></tr><tr><td>shared-FFN</td><td>57M</td><td>89.2/82.1</td><td>78.2/75.4</td><td>81.5</td><td>90.8</td><td>62.6</td><td>79.5</td></tr><tr><td>not-shared</td><td>108M</td><td>90.4/83.2</td><td>80.4/77.6</td><td>84.5</td><td>92.8</td><td>68.2</td><td>82.3</td></tr><tr><td rowspan="4">ALBERT
163
+ base
164
+ E=128</td><td>all-shared</td><td>12M</td><td>89.3/82.3</td><td>80.0/77.1</td><td>82.0</td><td>90.3</td><td>64.0</td><td>80.1</td></tr><tr><td>shared-attention</td><td>64M</td><td>89.9/82.8</td><td>80.7/77.9</td><td>83.4</td><td>91.9</td><td>67.6</td><td>81.7</td></tr><tr><td>shared-FFN</td><td>38M</td><td>88.9/81.6</td><td>78.6/75.6</td><td>82.3</td><td>91.7</td><td>64.4</td><td>80.2</td></tr><tr><td>not-shared</td><td>89M</td><td>89.9/82.8</td><td>80.3/77.3</td><td>83.2</td><td>91.5</td><td>67.9</td><td>81.6</td></tr></table>
165
+
166
+ Table 4: The effect of cross-layer parameter-sharing strategies, ALBERT-base configuration.
167
+
168
+ # 4.6 SENTENCE ORDER PREDICTION (SOP)
169
+
170
+ We compare head-to-head three experimental conditions for the additional inter-sentence loss: none (XLNet- and RoBERTa-style), NSP (BERT-style), and SOP (ALBERT-style), using an ALBERT-base configuration. Results are shown in Table 5, both over intrinsic (accuracy for the MLM, NSP, and SOP tasks) and downstream tasks.
171
+
172
+ <table><tr><td rowspan="2">SP tasks</td><td colspan="3">Intrinsic Tasks</td><td colspan="6">Downstream Tasks</td></tr><tr><td>MLM</td><td>NSP</td><td>SOP</td><td>SQuAD1.1</td><td>SQuAD2.0</td><td>MNLI</td><td>SST-2</td><td>RACE</td><td>Avg</td></tr><tr><td>None</td><td>54.9</td><td>52.4</td><td>53.3</td><td>88.6/81.5</td><td>78.1/75.3</td><td>81.5</td><td>89.9</td><td>61.7</td><td>79.0</td></tr><tr><td>NSP</td><td>54.5</td><td>90.5</td><td>52.0</td><td>88.4/81.5</td><td>77.2/74.6</td><td>81.6</td><td>91.1</td><td>62.3</td><td>79.2</td></tr><tr><td>SOP</td><td>54.0</td><td>78.9</td><td>86.5</td><td>89.3/82.3</td><td>80.0/77.1</td><td>82.0</td><td>90.3</td><td>64.0</td><td>80.1</td></tr></table>
173
+
174
+ The results on the intrinsic tasks reveal that the NSP loss brings no discriminative power to the SOP task (52.0% accuracy, similar to the random-guess performance for the "None" condition). This allows us to conclude that NSP ends up modeling only topic shift. In contrast, the SOP loss does solve the NSP task relatively well (78.9% accuracy), and the SOP task even better (86.5% accuracy). Even more importantly, the SOP loss appears to consistently improve downstream task performance for multi-sentence encoding tasks (around +1% for SQuAD1.1, +2% for SQuAD2.0, +1.7% for RACE), for an Avg score improvement of around +1%.
175
+
176
+ # 4.7 WHAT IF WE TRAIN FOR THE SAME AMOUNT OF TIME?
177
+
178
+ The speed-up results in Table 2 indicate that data-throughput for BERT-large is about $3.17\mathrm{x}$ higher compared to ALBERT-xxlarge. Since longer training usually leads to better performance, we perform a comparison in which, instead of controlling for data throughput (number of training steps), we control for the actual training time (i.e., let the models train for the same number of hours). In Table 6, we compare the performance of a BERT-large model after $400\mathrm{k}$ training steps (after 34h of training), roughly equivalent with the amount of time needed to train an ALBERT-xxlarge model with 125k training steps (32h of training).
179
+
180
+ Table 5: The effect of sentence-prediction loss, NSP vs. SOP, on intrinsic and downstream tasks.
181
+
182
+ <table><tr><td>Models</td><td>Steps</td><td>Time</td><td>SQuAD1.1</td><td>SQuAD2.0</td><td>MNLI</td><td>SST-2</td><td>RACE</td><td>Avg</td></tr><tr><td>BERT-large</td><td>400k</td><td>34h</td><td>93.5/87.4</td><td>86.9/84.3</td><td>87.8</td><td>94.6</td><td>77.3</td><td>87.2</td></tr><tr><td>ALBERT-xxlarge</td><td>125k</td><td>32h</td><td>94.0/88.1</td><td>88.3/85.3</td><td>87.8</td><td>95.4</td><td>82.5</td><td>88.7</td></tr></table>
183
+
184
+ After training for roughly the same amount of time, ALBERT-xxlarge is significantly better than BERT-large: $+1.5\%$ better on Avg, with the difference on RACE as high as $+5.2\%$ .
185
+
186
+ # 4.8 ADDITIONAL TRAINING DATA AND DROPOUT EFFECTS
187
+
188
+ The experiments done up to this point use only the Wikipedia and BOOKCORPUS datasets, as in (Devlin et al., 2019). In this section, we report measurements on the impact of the additional data used by both XLNet (Yang et al., 2019) and RoBERTa (Liu et al., 2019).
189
+
190
+ Fig. 2a plots the dev set MLM accuracy under two conditions, without and with additional data, with the latter condition giving a significant boost. We also observe performance improvements on the downstream tasks in Table 7, except for the SQuAD benchmarks (which are Wikipedia-based, and therefore are negatively affected by out-of-domain training material).
191
+
192
+ Table 6: The effect of controlling for training time, BERT-large vs ALBERT-xxlarge configurations.
193
+
194
+ <table><tr><td></td><td>SQuAD1.1</td><td>SQuAD2.0</td><td>MNLI</td><td>SST-2</td><td>RACE</td><td>Avg</td></tr><tr><td>No additional data</td><td>89.3/82.3</td><td>80.0/77.1</td><td>81.6</td><td>90.3</td><td>64.0</td><td>80.1</td></tr><tr><td>With additional data</td><td>88.8/81.7</td><td>79.1/76.3</td><td>82.4</td><td>92.8</td><td>66.0</td><td>80.8</td></tr></table>
195
+
196
+ Table 7: The effect of additional training data using the ALBERT-base configuration.
197
+
198
+ We also note that, even after training for 1M steps, our largest models still do not overfit to their training data. As a result, we decide to remove dropout to further increase our model capacity. The
199
+
200
+ ![](images/73ebecfa97fa9e9248ea37c9627731c814f4ad1f93c3d754f3da7954f4820d46.jpg)
201
+ (a) Adding data
202
+
203
+ ![](images/b0714aa9fd3c6ba9746c9e2c9a76b911ef2950fbc270a810dd82b455ac7f4535.jpg)
204
+ (b) Removing dropout
205
+ Figure 2: The effects of adding data and removing dropout during training.
206
+
207
+ plot in Fig. 2b shows that removing dropout significantly improves MLM accuracy. Intermediate evaluation on ALBERT-xxlarge at around 1M training steps (Table 8) also confirms that removing dropout helps the downstream tasks. There is empirical (Szegedy et al., 2017) and theoretical (Li et al., 2019) evidence showing that a combination of batch normalization and dropout in Convolutional Neural Networks may have harmful results. To the best of our knowledge, we are the first to show that dropout can hurt performance in large Transformer-based models. However, the underlying network structure of ALBERT is a special case of the transformer and further experimentation is needed to see if this phenomenon appears with other transformer-based architectures or not.
208
+
209
+ <table><tr><td></td><td>SQuAD1.1</td><td>SQuAD2.0</td><td>MNLI</td><td>SST-2</td><td>RACE</td><td>Avg</td></tr><tr><td>With dropout</td><td>94.7/89.2</td><td>89.6/86.9</td><td>90.0</td><td>96.3</td><td>85.7</td><td>90.4</td></tr><tr><td>Without dropout</td><td>94.8/89.5</td><td>89.9/87.2</td><td>90.4</td><td>96.5</td><td>86.1</td><td>90.7</td></tr></table>
210
+
211
+ # 4.9 CURRENT STATE-OF-THE-ART ON NLU TASKS
212
+
213
+ The results we report in this section make use of the training data used by Devlin et al. (2019), as well as the additional data used by Liu et al. (2019) and Yang et al. (2019). We report state-of-the-art results under two settings for fine-tuning: single-model and ensembles. In both settings, we only do single-task fine-tuning<sup>4</sup>. Following Liu et al. (2019), on the development set we report the median result over five runs.
214
+
215
+ Table 8: The effect of removing dropout, measured for an ALBERT-xxlarge configuration.
216
+
217
+ <table><tr><td>Models</td><td>MNLI</td><td>QNLI</td><td>QQP</td><td>RTE</td><td>SST</td><td>MRPC</td><td>CoLA</td><td>STS</td><td>WNLI</td><td>Avg</td></tr><tr><td colspan="11">Single-task single models on dev</td></tr><tr><td>BERT-large</td><td>86.6</td><td>92.3</td><td>91.3</td><td>70.4</td><td>93.2</td><td>88.0</td><td>60.6</td><td>90.0</td><td>-</td><td>-</td></tr><tr><td>XLNet-large</td><td>89.8</td><td>93.9</td><td>91.8</td><td>83.8</td><td>95.6</td><td>89.2</td><td>63.6</td><td>91.8</td><td>-</td><td>-</td></tr><tr><td>RoBERTa-large</td><td>90.2</td><td>94.7</td><td>92.2</td><td>86.6</td><td>96.4</td><td>90.9</td><td>68.0</td><td>92.4</td><td>-</td><td>-</td></tr><tr><td>ALBERT (1M)</td><td>90.4</td><td>95.2</td><td>92.0</td><td>88.1</td><td>96.8</td><td>90.2</td><td>68.7</td><td>92.7</td><td>-</td><td>-</td></tr><tr><td>ALBERT (1.5M)</td><td>90.8</td><td>95.3</td><td>92.2</td><td>89.2</td><td>96.9</td><td>90.9</td><td>71.4</td><td>93.0</td><td>-</td><td>-</td></tr><tr><td colspan="11">Ensembles on test (from leaderboard as of Sept. 16, 2019)</td></tr><tr><td>ALICE</td><td>88.2</td><td>95.7</td><td>90.7</td><td>83.5</td><td>95.2</td><td>92.6</td><td>69.2</td><td>91.1</td><td>80.8</td><td>87.0</td></tr><tr><td>MT-DNN</td><td>87.9</td><td>96.0</td><td>89.9</td><td>86.3</td><td>96.5</td><td>92.7</td><td>68.4</td><td>91.1</td><td>89.0</td><td>87.6</td></tr><tr><td>XLNet</td><td>90.2</td><td>98.6</td><td>90.3</td><td>86.3</td><td>96.8</td><td>93.0</td><td>67.8</td><td>91.6</td><td>90.4</td><td>88.4</td></tr><tr><td>RoBERTa</td><td>90.8</td><td>98.9</td><td>90.2</td><td>88.2</td><td>96.7</td><td>92.3</td><td>67.8</td><td>92.2</td><td>89.0</td><td>88.5</td></tr><tr><td>Adv-RoBERTa</td><td>91.1</td><td>98.8</td><td>90.3</td><td>88.7</td><td>96.8</td><td>93.1</td><td>68.0</td><td>92.4</td><td>89.0</td><td>88.8</td></tr><tr><td>ALBERT</td><td>91.3</td><td>99.2</td><td>90.5</td><td>89.2</td><td>97.1</td><td>93.4</td><td>69.1</td><td>92.5</td><td>91.8</td><td>89.4</td></tr></table>
218
+
219
+ Table 9: State-of-the-art results on the GLUE benchmark. For single-task single-model results, we report ALBERT at 1M steps (comparable to RoBERTa) and at 1.5M steps. The ALBERT ensemble uses models trained with 1M, 1.5M, and other numbers of steps.
220
+
221
+ The single-model ALBERT configuration incorporates the best-performing settings discussed: an ALBERT-xxlarge configuration (Table 1) using combined MLM and SOP losses, and no dropout.
222
+
223
+ The checkpoints that contribute to the final ensemble model are selected based on development set performance; the number of checkpoints considered for this selection range from 6 to 17, depending on the task. For the GLUE (Table 9) and RACE (Table 10) benchmarks, we average the model predictions for the ensemble models, where the candidates are fine-tuned from different training steps using the 12-layer and 24-layer architectures. For SQuAD (Table 10), we average the prediction scores for those spans that have multiple probabilities; we also average the scores of the "unanswerable" decision.
224
+
225
+ Both single-model and ensemble results indicate that ALBERT improves the state-of-the-art significantly for all three benchmarks, achieving a GLUE score of 89.4, a SQuAD 2.0 test F1 score of 92.2, and a RACE test accuracy of 89.4. The latter appears to be a particularly strong improvement, a jump of $+17.4\%$ absolute points over BERT (Devlin et al., 2019; Clark et al., 2019), $+7.6\%$ over XLNet (Yang et al., 2019), $+6.2\%$ over RoBERTa (Liu et al., 2019), and $5.3\%$ over DCMI+ (Zhang et al., 2019), an ensemble of multiple models specifically designed for reading comprehension tasks. Our single model achieves an accuracy of $86.5\%$ , which is still $2.4\%$ better than the state-of-the-art ensemble model.
226
+
227
+ <table><tr><td>Models</td><td>SQuAD1.1 dev</td><td>SQuAD2.0 dev</td><td>SQuAD2.0 test</td><td>RACE test (Middle/High)</td></tr><tr><td colspan="5">Single model (from leaderboard as of Sept. 23, 2019)</td></tr><tr><td>BERT-large</td><td>90.9/84.1</td><td>81.8/79.0</td><td>89.1/86.3</td><td>72.0 (76.6/70.1)</td></tr><tr><td>XLNet</td><td>94.5/89.0</td><td>88.8/86.1</td><td>89.1/86.3</td><td>81.8 (85.5/80.2)</td></tr><tr><td>RoBERTa</td><td>94.6/88.9</td><td>89.4/86.5</td><td>89.8/86.8</td><td>83.2 (86.5/81.3)</td></tr><tr><td>UPM</td><td>-</td><td>-</td><td>89.9/87.2</td><td>-</td></tr><tr><td>XLNet + SG-Net Verifier++</td><td>-</td><td>-</td><td>90.1/87.2</td><td>-</td></tr><tr><td>ALBERT (1M)</td><td>94.8/89.2</td><td>89.9/87.2</td><td>-</td><td>86.0 (88.2/85.1)</td></tr><tr><td>ALBERT (1.5M)</td><td>94.8/89.3</td><td>90.2/87.4</td><td>90.9/88.1</td><td>86.5 (89.0/85.5)</td></tr><tr><td colspan="5">Ensembles (from leaderboard as of Sept. 23, 2019)</td></tr><tr><td>BERT-large</td><td>92.2/86.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>XLNet + SG-Net Verifier</td><td>-</td><td>-</td><td>90.7/88.2</td><td>-</td></tr><tr><td>UPM</td><td>-</td><td>-</td><td>90.7/88.2</td><td></td></tr><tr><td>XLNet + DAAF + Verifier</td><td>-</td><td>-</td><td>90.9/88.6</td><td>-</td></tr><tr><td>DCMN+</td><td>-</td><td>-</td><td>-</td><td>84.1 (88.5/82.3)</td></tr><tr><td>ALBERT</td><td>95.5/90.1</td><td>91.4/88.9</td><td>92.2/89.7</td><td>89.4 (91.2/88.6)</td></tr></table>
228
+
229
+ Table 10: State-of-the-art results on the SQuAD and RACE benchmarks.
230
+
231
+ # 5 DISCUSSION
232
+
233
+ While ALBERT-xxlarge has less parameters than BERT-large and gets significantly better results, it is computationally more expensive due to its larger structure. An important next step is thus to speed up the training and inference speed of ALBERT through methods like sparse attention (Child et al., 2019) and block attention (Shen et al., 2018). An orthogonal line of research, which could provide additional representation power, includes hard example mining (Mikolov et al., 2013) and more efficient language modeling training (Yang et al., 2019). Additionally, although we have convincing evidence that sentence order prediction is a more consistently-useful learning task that leads to better language representations, we hypothesize that there could be more dimensions not yet captured by the current self-supervised training losses that could create additional representation power for the resulting representations.
234
+
235
+ # ACKNOWLEDGEMENT
236
+
237
+ The authors would like to thank Beer Changpinyo, Nan Ding, Noam Shazeer, and Tomer Levinboim for discussion and providing useful feedback on the project; Omer Levy and Naman Goyal for clarifying experimental setup for RoBERTa; Zihang Dai for clarifying XLNet; Brandon Norick, Emma Strubell, Shaojie Bai, Chas Leichner, and Sachin Mehta for providing useful feedback on the paper; Jacob Devlin for providing the English and multilingual version of training data; Liang Xu, Chenjie Cao and the CLUE community for providing the training data and evaluation benchmark of the Chinese version of ALBERT models.
238
+
239
+ # REFERENCES
240
+
241
+ Alexei Baevski and Michael Auli. Adaptive input representations for neural language modeling. arXiv preprint arXiv:1809.10853, 2018.
242
+ Shaojie Bai, J. Zico Kolter, and Vladlen Koltun. Deep equilibrium models. In Neural Information Processing Systems (NeurIPS), 2019.
243
+ Roy Bar-Haim, Ido Dagan, Bill Dolan, Lisa Ferro, Danilo Giampiccolo, Bernardo Magnini, and Idan Szpektor. The second PASCAL recognising textual entailment challenge. In Proceedings of the second PASCAL challenges workshop on recognising textual entailment, volume 6, pp. 6-4. Venice, 2006.
244
+ Luisa Bentivogli, Peter Clark, Ido Dagan, and Danilo Giampiccolo. The fifth PASCAL recognizing textual entailment challenge. In TAC, 2009.
245
+ Daniel Cer, Mona Diab, Eneko Agirre, Iñigo Lopez-Gazpio, and Lucia Specia. SemEval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pp. 1-14, Vancouver, Canada, August 2017. Association for Computational Linguistics. doi: 10.18653/v1/S17-2001. URL https://www.aclweb.org/anthology/S17-2001.
246
+ Tianqi Chen, Bing Xu, Chiyuan Zhang, and Carlos Guestrin. Training deep nets with sublinear memory cost. arXiv preprint arXiv:1604.06174, 2016.
247
+ Rewon Child, Scott Gray, Alec Radford, and Ilya Sutskever. Generating long sequences with sparse transformers. arXiv preprint arXiv:1904.10509, 2019.
248
+ Kevin Clark, Minh-Thang Luong, Urvashi Khandelwal, Christopher D Manning, and Quoc V Le. Bam! born-again multi-task networks for natural language understanding. arXiv preprint arXiv:1907.04829, 2019.
249
+ Ido Dagan, Oren Glickman, and Bernardo Magnini. The PASCAL recognising textual entailment challenge. In Machine Learning Challenges Workshop, pp. 177-190. Springer, 2005.
250
+ Andrew M Dai and Quoc V Le. Semi-supervised sequence learning. In Advances in neural information processing systems, pp. 3079-3087, 2015.
251
+ Zihang Dai, Zhilin Yang, Yiming Yang, William W Cohen, Jaime Carbonell, Quoc V Le, and Ruslan Salakhutdinov. Transformer-xl: Attentive language models beyond a fixed-length context. arXiv preprint arXiv:1901.02860, 2019.
252
+ Mostafa Dehghani, Stephan Gouws, Oriol Vinyals, Jakob Uszkoreit, and Lukasz Kaiser. Universal transformers. arXiv preprint arXiv:1807.03819, 2018.
253
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171-4186, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1423. URL https://www.aclweb.org/anthology/N19-1423.
254
+ William B. Dolan and Chris Brockett. Automatically constructing a corpus of sentential paraphrases. In Proceedings of the Third International Workshop on Paraphrasing (IWP2005), 2005. URL: https://www.aclweb.org/anthology/I05-5002.
255
+ Zhe Gan, Yunchen Pu, Ricardo Henao, Chunyuan Li, Xiaodong He, and Lawrence Carin. Learning generic sentence representations using convolutional neural networks. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pp. 2390-2400, Copenhagen, Denmark, September 2017. Association for Computational Linguistics. doi: 10.18653/v1/D17-1254. URL https://www.aclweb.org/anthology/D17-1254.
256
+
257
+ Danilo Giampiccolo, Bernardo Magnini, Ido Dagan, and Bill Dolan. The third PASCAL recognizing textual entailment challenge. In Proceedings of the ACL-PASCAL Workshop on Textual Entailment and Paraphrasing, pp. 1-9, Prague, June 2007. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/W07-1401.
258
+ Aidan N Gomez, Mengye Ren, Raquel Urtasun, and Roger B Grosse. The reversible residual network: Backpropagation without storing activations. In Advances in neural information processing systems, pp. 2214-2224, 2017.
259
+ Linyuan Gong, Di He, Zhuohan Li, Tao Qin, Liwei Wang, and Tieyan Liu. Efficient training of bert by progressively stacking. In International Conference on Machine Learning, pp. 2337-2346, 2019.
260
+ Edouard Grave, Armand Joulin, Moustapha Cisse, Hervé Jégou, et al. Efficient softmax approximation for gpus. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 1302-1310. JMLR.org, 2017.
261
+ Barbara J. Grosz, Aravind K. Joshi, and Scott Weinstein. Centering: A framework for modeling the local coherence of discourse. Computational Linguistics, 21(2):203-225, 1995. URL https://www.aclweb.org/anthology/J95-2003.
262
+ M.A.K. Halliday and Ruqaiya Hasan. Cohesion in English. Routledge, 1976.
263
+ Jie Hao, Xing Wang, Baosong Yang, Longyue Wang, Jinfeng Zhang, and Zhaopeng Tu. Modeling recurrence for transformer. Proceedings of the 2019 Conference of the North, 2019. doi: 10.18653/v1/n19-1122. URL http://dx.doi.org/10.18653/v1/n19-1122.
264
+ Dan Hendrycks and Kevin Gimpel. Gaussian Error Linear Units (GELUs). arXiv preprint arXiv:1606.08415, 2016.
265
+ Felix Hill, Kyunghyun Cho, and Anna Korhonen. Learning distributed representations of sentences from unlabelled data. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 1367-1377. Association for Computational Linguistics, 2016. doi: 10.18653/v1/N16-1162. URL http://aclweb.org/anthology/N16-1162.
266
+ Jerry R. Hobbs. Coherence and coreference. Cognitive Science, 3(1):67-90, 1979.
267
+ Jeremy Howard and Sebastian Ruder. Universal language model fine-tuning for text classification. arXiv preprint arXiv:1801.06146, 2018.
268
+ Shankar Iyer, Nikhil Dandekar, and Kornl Csernai. First quora dataset release: Question pairs, January 2017. URL https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs.
269
+ Yacine Jernite, Samuel R Bowman, and David Sontag. Discourse-based objectives for fast unsupervised sentence representation learning. arXiv preprint arXiv:1705.00557, 2017.
270
+ Mandar Joshi, Danqi Chen, Yinhan Liu, Daniel S Weld, Luke Zettlemoyer, and Omer Levy. SpanBERT: Improving pre-training by representing and predicting spans. arXiv preprint arXiv:1907.10529, 2019.
271
+ Ryan Kiros, Yukun Zhu, Ruslan Salakhutdinov, Richard S. Zemel, Antonio Torralba, Raquel Urtasun, and Sanja Fidler. Skip-thought vectors. In Proceedings of the 28th International Conference on Neural Information Processing Systems - Volume 2, NIPS'15, pp. 3294-3302, Cambridge, MA, USA, 2015. MIT Press. URL http://dl.acm.org/citation.cfm?id=2969442.2969607.
272
+ Taku Kudo and John Richardson. SentencePiece: A simple and language independent subword tokenizer and tokenizer for neural text processing. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pp. 66-71, Brussels, Belgium, November 2018. Association for Computational Linguistics. doi: 10.18653/v1/D18-2012. URL https://www.aclweb.org/anthology/D18-2012.
273
+
274
+ Guokun Lai, Qizhe Xie, Hanxiao Liu, Yiming Yang, and Eduard Hovy. RACE: Large-scale ReAding comprehension dataset from examinations. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pp. 785-794, Copenhagen, Denmark, September 2017. Association for Computational Linguistics. doi: 10.18653/v1/D17-1082. URL https://www.aclweb.org/anthology/D17-1082.
275
+ Quoc Le and Tomas Mikolov. Distributed representations of sentences and documents. In Proceedings of the 31st ICML, Beijing, China, 2014.
276
+ Hector Levesque, Ernest Davis, and Leora Morgenstern. The Winograd schema challenge. In Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning, 2012.
277
+ Xiang Li, Shuo Chen, Xiaolin Hu, and Jian Yang. Understanding the disharmony between dropout and batch normalization by variance shift. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2682-2690, 2019.
278
+ Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. RoBERTa: A robustly optimized BERT pretraining approach. arXiv preprint arXiv:1907.11692, 2019.
279
+ Bryan McCann, James Bradbury, Caiming Xiong, and Richard Socher. Learned in translation: Contextualized word vectors. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett (eds.), Advances in Neural Information Processing Systems 30, pp. 6294-6305. Curran Associates, Inc., 2017. URL http://papers.nips.cc/paper/7209-learned-in-translation-contextualized-word-vectors.pdf.
280
+ Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean. Distributed representations of words and phrases and their compositionality. In Advances in neural information processing systems, pp. 3111-3119, 2013.
281
+ Allen Nie, Erin Bennett, and Noah Goodman. DisSent: Learning sentence representations from explicit discourse relations. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 4497-4510, Florence, Italy, July 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1442. URL https://www.aclweb.org/anthology/P19-1442.
282
+ Jeffrey Pennington, Richard Socher, and Christopher Manning. Glove: Global vectors for word representation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 1532-1543, Doha, Qatar, October 2014. Association for Computational Linguistics. doi: 10.3115/v1/D14-1162. URL https://www.aclweb.org/anthology/D14-1162.
283
+ Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. Deep contextualized word representations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 2227-2237, New Orleans, Louisiana, June 2018. Association for Computational Linguistics. doi: 10.18653/v1/N18-1202. URL https://www.aclweb.org/anthology/N18-1202.
284
+ Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. https://s3-us-west-2.amazon.com/openai-assetss/research-covers/language-unsupervised/language_understanding_paper.pdf, 2018.
285
+ Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. Language models are unsupervised multitask learners. OpenAI Blog, 1(8), 2019.
286
+ Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv preprint arXiv:1910.10683, 2019.
287
+
288
+ Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pp. 2383-2392, Austin, Texas, November 2016. Association for Computational Linguistics. doi: 10.18653/v1/D16-1264. URL https://www.aclweb.org/anthology/D16-1264.
289
+ Pranav Rajpurkar, Robin Jia, and Percy Liang. Know what you don't know: Unanswerable questions for SQuAD. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pp. 784-789, Melbourne, Australia, July 2018. Association for Computational Linguistics. doi: 10.18653/v1/P18-2124. URL https://www.aclweb.org/anthology/P18-2124.
290
+ Noam Shazeer, Youlong Cheng, Niki Parmar, Dustin Tran, Ashish Vaswani, Penporn Koanantakool, Peter Hawkins, HyoukJoong Lee, Mingsheng Hong, Cliff Young, et al. Mesh-tensorflow: Deep learning for supercomputers. In Advances in Neural Information Processing Systems, pp. 10414-10423, 2018.
291
+ Tao Shen, Tianyi Zhou, Guodong Long, Jing Jiang, and Chengqi Zhang. Bi-directional block selfattention for fast and memory-efficient sequence modeling. arXiv preprint arXiv:1804.00857, 2018.
292
+ Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatron-LM: Training multi-billion parameter language models using model parallelism, 2019.
293
+ Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and Christopher Potts. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pp. 1631-1642, Seattle, Washington, USA, October 2013. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/D13-1170.
294
+ Siqi Sun, Yu Cheng, Zhe Gan, and Jingjing Liu. Patient knowledge distillation for BERT model compression. arXiv preprint arXiv:1908.09355, 2019.
295
+ Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, and Alexander A Alemi. Inception-v4, inception-resnet and the impact of residual connections on learning. In Thirty-First AAAI Conference on Artificial Intelligence, 2017.
296
+ Iulia Turc, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Well-read students learn better: The impact of student initialization on knowledge distillation. arXiv preprint arXiv:1908.08962, 2019.
297
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pp. 5998-6008, 2017.
298
+ Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pp. 353-355, Brussels, Belgium, November 2018. Association for Computational Linguistics. doi: 10.18653/v1/W18-5446. URL https://www.aclweb.org/anthology/W18-5446.
299
+ Wei Wang, Bin Bi, Ming Yan, Chen Wu, Zuyi Bao, Liwei Peng, and Luo Si. StructBERT: Incorporating language structures into pre-training for deep language understanding. arXiv preprint arXiv:1908.04577, 2019.
300
+ Alex Warstadt, Amanpreet Singh, and Samuel R Bowman. Neural network acceptability judgments. arXiv preprint arXiv:1805.12471, 2018.
301
+ Adina Williams, Nikita Nangia, and Samuel Bowman. A broad-coverage challenge corpus for sentence understanding through inference. In Proceedings of the 2018 Conference of the North
302
+
303
+ American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pp. 1112-1122, New Orleans, Louisiana, June 2018. Association for Computational Linguistics. doi: 10.18653/v1/N18-1101. URL https://www.aclweb.org/anthology/N18-1101.
304
+
305
+ Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, and Quoc V Le. XLNet: Generalized autoregressive pretraining for language understanding. arXiv preprint arXiv:1906.08237, 2019.
306
+
307
+ Yang You, Jing Li, Jonathan Hseu, Xiaodan Song, James Demmel, and Cho-Jui Hsieh. Reducing BERT pre-training time from 3 days to 76 minutes. arXiv preprint arXiv:1904.00962, 2019.
308
+
309
+ Shuailiang Zhang, Hai Zhao, Yuwei Wu, Zhuosheng Zhang, Xi Zhou, and Xiang Zhou. DCMN+: Dual co-matching network for multi-choice reading comprehension. arXiv preprint arXiv:1908.11511, 2019.
310
+
311
+ Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE international conference on computer vision, pp. 19-27, 2015.
312
+
313
+ # A APPENDIX
314
+
315
+ # A.1 EFFECT OF NETWORK DEPTH AND WIDTH
316
+
317
+ In this section, we check how depth (number of layers) and width (hidden size) affect the performance of ALBERT. Table 11 shows the performance of an ALBERT-large configuration (see Table 1) using different numbers of layers. Networks with 3 or more layers are trained by fine-tuning using the parameters from the depth before (e.g., the 12-layer network parameters are fine-tuned from the checkpoint of the 6-layer network parameters). Similar technique has been used in Gong et al. (2019). If we compare a 3-layer ALBERT model with a 1-layer ALBERT model, although they have the same number of parameters, the performance increases significantly. However, there are diminishing returns when continuing to increase the number of layers: the results of a 12-layer network are relatively close to the results of a 24-layer network, and the performance of a 48-layer network appears to decline.
318
+
319
+ <table><tr><td>Number of layers</td><td>Parameters</td><td>SQuAD1.1</td><td>SQuAD2.0</td><td>MNLI</td><td>SST-2</td><td>RACE</td><td>Avg</td></tr><tr><td>1</td><td>18M</td><td>31.1/22.9</td><td>50.1/50.1</td><td>66.4</td><td>80.8</td><td>40.1</td><td>52.9</td></tr><tr><td>3</td><td>18M</td><td>79.8/69.7</td><td>64.4/61.7</td><td>77.7</td><td>86.7</td><td>54.0</td><td>71.2</td></tr><tr><td>6</td><td>18M</td><td>86.4/78.4</td><td>73.8/71.1</td><td>81.2</td><td>88.9</td><td>60.9</td><td>77.2</td></tr><tr><td>12</td><td>18M</td><td>89.8/83.3</td><td>80.7/77.9</td><td>83.3</td><td>91.7</td><td>66.7</td><td>81.5</td></tr><tr><td>24</td><td>18M</td><td>90.3/83.3</td><td>81.8/79.0</td><td>83.3</td><td>91.5</td><td>68.7</td><td>82.1</td></tr><tr><td>48</td><td>18M</td><td>90.0/83.1</td><td>81.8/78.9</td><td>83.4</td><td>91.9</td><td>66.9</td><td>81.8</td></tr></table>
320
+
321
+ Table 11: The effect of increasing the number of layers for an ALBERT-large configuration.
322
+
323
+ A similar phenomenon, this time for width, can be seen in Table 12 for a 3-layer ALBERT-large configuration. As we increase the hidden size, we get an increase in performance with diminishing returns. At a hidden size of 6144, the performance appears to decline significantly. We note that none of these models appear to overfit the training data, and they all have higher training and development loss compared to the best-performing ALBERT configurations.
324
+
325
+ <table><tr><td>Hidden size</td><td>Parameters</td><td>SQuAD1.1</td><td>SQuAD2.0</td><td>MNLI</td><td>SST-2</td><td>RACE</td><td>Avg</td></tr><tr><td>1024</td><td>18M</td><td>79.8/69.7</td><td>64.4/61.7</td><td>77.7</td><td>86.7</td><td>54.0</td><td>71.2</td></tr><tr><td>2048</td><td>60M</td><td>83.3/74.1</td><td>69.1/66.6</td><td>79.7</td><td>88.6</td><td>58.2</td><td>74.6</td></tr><tr><td>4096</td><td>225M</td><td>85.0/76.4</td><td>71.0/68.1</td><td>80.3</td><td>90.4</td><td>60.4</td><td>76.3</td></tr><tr><td>6144</td><td>499M</td><td>84.7/75.8</td><td>67.8/65.4</td><td>78.1</td><td>89.1</td><td>56.0</td><td>74.0</td></tr></table>
326
+
327
+ # A.2 DO VERY WIDE ALBERT MODELS NEED TO BE DEEP(ER) TOO?
328
+
329
+ In Section A.1, we show that for ALBERT-large ( $H = 1024$ ), the difference between a 12-layer and a 24-layer configuration is small. Does this result still hold for much wider ALBERT configurations, such as ALBERT-xxlarge ( $H = 4096$ )?
330
+
331
+ Table 12: The effect of increasing the hidden-layer size for an ALBERT-large 3-layer configuration.
332
+
333
+ <table><tr><td>Number of layers</td><td>SQuAD1.1</td><td>SQuAD2.0</td><td>MNLI</td><td>SST-2</td><td>RACE</td><td>Avg</td></tr><tr><td>12</td><td>94.0/88.1</td><td>88.3/85.3</td><td>87.8</td><td>95.4</td><td>82.5</td><td>88.7</td></tr><tr><td>24</td><td>94.1/88.3</td><td>88.1/85.1</td><td>88.0</td><td>95.2</td><td>82.3</td><td>88.7</td></tr></table>
334
+
335
+ Table 13: The effect of a deeper network using an ALBERT-xxlarge configuration.
336
+
337
+ The answer is given by the results from Table 13. The difference between 12-layer and 24-layer ALBERT-xxlarge configurations in terms of downstream accuracy is negligible, with the Avg score being the same. We conclude that, when sharing all cross-layer parameters (ALBERT-style), there is no need for models deeper than a 12-layer configuration.
338
+
339
+ # A.3 DOWNSTREAM EVALUATION TASKS
340
+
341
+ GLUE GLUE is comprised of 9 tasks, namely Corpus of Linguistic Acceptability (CoLA; Warstadt et al., 2018), Stanford Sentiment Treebank (SST; Socher et al., 2013), Microsoft Research Paraphrase Corpus (MRPC; Dolan & Brockett, 2005), Semantic Textual Similarity Benchmark (STS; Cer et al., 2017), Quora Question Pairs (QQP; Iyer et al., 2017), Multi-Genre NLI (MNLI; Williams et al., 2018), Question NLI (QNLI; Rajpurkar et al., 2016), Recognizing Textual Entailment (RTE; Dagan et al., 2005; Bar-Haim et al., 2006; Giampiccolo et al., 2007; Bentivogli et al., 2009) and Winograd NLI (WNLI; Levesque et al., 2012). It focuses on evaluating model capabilities for natural language understanding. When reporting MNLI results, we only report the "match" condition (MNLI-m). We follow the finetuning procedures from prior work (Devlin et al., 2019; Liu et al., 2019; Yang et al., 2019) and report the held-out test set performance obtained from GLUE submissions. For test set submissions, we perform task-specific modifications for WNLI and QNLI as described by Liu et al. (2019) and Yang et al. (2019).
342
+
343
+ SQuAD SQuAD is an extractive question answering dataset built from Wikipedia. The answers are segments from the context paragraphs and the task is to predict answer spans. We evaluate our models on two versions of SQuAD: v1.1 and v2.0. SQuAD v1.1 has 100,000 human-annotated question/answer pairs. SQuAD v2.0 additionally introduced 50,000 unanswerable questions. For SQuAD v1.1, we use the same training procedure as BERT, whereas for SQuAD v2.0, models are jointly trained with a span extraction loss and an additional classifier for predicting answerability (Yang et al., 2019; Liu et al., 2019). We report both development set and test set performance.
344
+
345
+ RACE RACE is a large-scale dataset for multi-choice reading comprehension, collected from English examinations in China with nearly 100,000 questions. Each instance in RACE has 4 candidate answers. Following prior work (Yang et al., 2019; Liu et al., 2019), we use the concatenation of the passage, question, and each candidate answer as the input to models. Then, we use the representations from the “[CLS)” token for predicting the probability of each answer. The dataset consists of two domains: middle school and high school. We train our models on both domains and report accuracies on both the development set and test set.
346
+
347
+ # A.4 HYPERPARAMETERS
348
+
349
+ Hyperparameters for downstream tasks are shown in Table 14. We adapt these hyperparameters from Liu et al. (2019), Devlin et al. (2019), and Yang et al. (2019).
350
+
351
+ <table><tr><td></td><td>LR</td><td>BSZ</td><td>ALBERT DR</td><td>Classifier DR</td><td>TS</td><td>WS</td><td>MSL</td></tr><tr><td>CoLA</td><td>1.00E-05</td><td>16</td><td>0</td><td>0.1</td><td>5336</td><td>320</td><td>512</td></tr><tr><td>STS</td><td>2.00E-05</td><td>16</td><td>0</td><td>0.1</td><td>3598</td><td>214</td><td>512</td></tr><tr><td>SST-2</td><td>1.00E-05</td><td>32</td><td>0</td><td>0.1</td><td>20935</td><td>1256</td><td>512</td></tr><tr><td>MNLI</td><td>3.00E-05</td><td>128</td><td>0</td><td>0.1</td><td>10000</td><td>1000</td><td>512</td></tr><tr><td>QNLI</td><td>1.00E-05</td><td>32</td><td>0</td><td>0.1</td><td>33112</td><td>1986</td><td>512</td></tr><tr><td>QQP</td><td>5.00E-05</td><td>128</td><td>0.1</td><td>0.1</td><td>14000</td><td>1000</td><td>512</td></tr><tr><td>RTE</td><td>3.00E-05</td><td>32</td><td>0.1</td><td>0.1</td><td>800</td><td>200</td><td>512</td></tr><tr><td>MRPC</td><td>2.00E-05</td><td>32</td><td>0</td><td>0.1</td><td>800</td><td>200</td><td>512</td></tr><tr><td>WNLI</td><td>2.00E-05</td><td>16</td><td>0.1</td><td>0.1</td><td>2000</td><td>250</td><td>512</td></tr><tr><td>SQuAD v1.1</td><td>5.00E-05</td><td>48</td><td>0</td><td>0.1</td><td>3649</td><td>365</td><td>384</td></tr><tr><td>SQuAD v2.0</td><td>3.00E-05</td><td>48</td><td>0</td><td>0.1</td><td>8144</td><td>814</td><td>512</td></tr><tr><td>RACE</td><td>2.00E-05</td><td>32</td><td>0.1</td><td>0.1</td><td>12000</td><td>1000</td><td>512</td></tr></table>
352
+
353
+ Table 14: Hyperparameters for ALBERT in downstream tasks. LR: Learning Rate. BSZ: Batch Size. DR: Dropout Rate. TS: Training Steps. WS: Warmup Steps. MSL: Maximum Sequence Length.
albertalitebertforselfsupervisedlearningoflanguagerepresentations/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dea600be19c7091584e068945aef779f1933eec7f5915d9803655b8b3444bd28
3
+ size 636768
albertalitebertforselfsupervisedlearningoflanguagerepresentations/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81b80570e58360af41f9b6f93a1f5454b5f61d8b514003ba826ca224a1249408
3
+ size 446473
amutualinformationmaximizationperspectiveoflanguagerepresentationlearning/d375015a-3459-4918-8a50-7e23cf8bf217_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbe94d892481ad79eba87a3fe861b00e7dad7802998676c5fcefb29a729c5f7f
3
+ size 75490
amutualinformationmaximizationperspectiveoflanguagerepresentationlearning/d375015a-3459-4918-8a50-7e23cf8bf217_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:863deb040ee14dc3ea49c5966b22ce513d0a7b5922317bbb2819a9f1af87bbb8
3
+ size 90911
amutualinformationmaximizationperspectiveoflanguagerepresentationlearning/d375015a-3459-4918-8a50-7e23cf8bf217_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:885784d1e4aa6e37ce56986cad6fecfd8f8bc303a89de85a50a1cbcdfa5cc2cc
3
+ size 545090
amutualinformationmaximizationperspectiveoflanguagerepresentationlearning/full.md ADDED
@@ -0,0 +1,268 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A MUTUAL INFORMATION MAXIMIZATION PERSPECTIVE OF LANGUAGE REPRESENTATION LEARNING
2
+
3
+ Lingpeng Kong\*, Cyprien de Masson d'Autume\*, Wang Ling\*, Lei Yu\*, Zihang Dai\* Dani Yogatama\*
4
+
5
+ DeepMind\*, Carnegie Mellon University, Google Brain
6
+
7
+ London, United Kingdom
8
+
9
+ {lingpenk, cyprien, lingwang, leiyu, zihangd, dyogatama}@google.com
10
+
11
+ # ABSTRACT
12
+
13
+ We show state-of-the-art word representation learning methods maximize an objective function that is a lower bound on the mutual information between different parts of a word sequence (i.e., a sentence). Our formulation provides an alternative perspective that unifies classical word embedding models (e.g., Skip-gram) and modern contextual embeddings (e.g., BERT, XLNet). In addition to enhancing our theoretical understanding of these methods, our derivation leads to a principled framework that can be used to construct new self-supervised tasks. We provide an example by drawing inspirations from related methods based on mutual information maximization that have been successful in computer vision, and introduce a simple self-supervised objective that maximizes the mutual information between a global sentence representation and $n$ -grams in the sentence. Our analysis offers a holistic view of representation learning methods to transfer knowledge and translate progress across multiple domains (e.g., natural language processing, computer vision, audio processing).
14
+
15
+ # 1 INTRODUCTION
16
+
17
+ Advances in representation learning have driven progress in natural language processing. Performance on many downstream tasks have improved considerably, achieving parity with human baselines in benchmark leaderboards such as SQuAD (Rajpurkar et al., 2016; 2018) and GLUE (Wang et al., 2019). The main ingredient is the "pretrain and fine-tune" approach, where a large text encoder is trained on an unlabeled corpus with self-supervised training objectives and used to initialize a task-specific model. Such an approach has also been shown to reduce the number of training examples that is needed to achieve good performance on the task of interest (Yogatama et al., 2019).
18
+
19
+ In contrast to first-generation models that learn word type embeddings (Mikolov et al., 2013; Pennington et al., 2014), recent methods have focused on contextual token representations—i.e., learning an encoder to represent words in context. Many of these encoders are trained with a language modeling objective, where the representation of a context is trained to be predictive of a target token by maximizing the log likelihood of predicting this token (Dai & Le, 2015; Howard & Ruder, 2018; Radford et al., 2018; 2019). In a vanilla language modeling objective, the target token is always the next token that follows the context. Peters et al. (2018) propose an improvement by adding a reverse objective that also predicts the word token that precedes the context. Following this trend, current state-of-the-art encoders such as BERT (Devlin et al., 2018) and XLNet (Yang et al., 2019) are also trained with variants of the language modeling objective: masked language modeling and permutation language modeling.
20
+
21
+ In this paper, we provide an alternative view and show that these methods also maximize a lower bound on the mutual information between different parts of a word sequence. Such a framework is inspired by the InfoMax principle (Linsker, 1988) and has been the main driver of progress in self-supervised representation learning in other domains such as computer vision, audio processing, and reinforcement learning (Belghazi et al., 2018; van den Oord et al., 2019; Hjelm et al., 2019;
22
+
23
+ Bachman et al., 2019; O'Connor & Veeling, 2019). Many of these methods are trained to maximize a particular lower bound called InfoNCE (van den Oord et al., 2019)—also known as contrastive learning (Arora et al., 2019). The main idea behind contrastive learning is to divide an input data into multiple (possibly overlapping) views and maximize the mutual information between encoded representations of these views, using views derived from other inputs as negative samples. In §2, we provide an overview of representation learning with mutual information maximization. We then show how the skip-gram objective (§3.1; Mikolov et al. 2013), masked language modeling (§3.2; Devlin et al. 2018), and permutation language modeling (§3.3; Yang et al. 2019), fit in this framework.
24
+
25
+ In addition to providing a principled theoretical understanding that bridges progress in multiple areas, our proposed framework also gives rise to a general class of word representation learning models which serves as a basis for designing and combining self-supervised training objectives to create better language representations. As an example, we show how to use this framework to construct a simple self-supervised objective that maximizes the mutual information between a sentence and $n$ -grams in the sentence ( $\S 4$ ). We combine it with a variant of the masked language modeling objective and show that the resulting representation performs better, particularly on tasks such as question answering and linguistics acceptability ( $\S 5$ ).
26
+
27
+ # 2 MUTUAL INFORMATION MAXIMIZATION
28
+
29
+ Mutual information measures dependencies between random variables. Given two random variables $A$ and $B$ , it can be understood as how much knowing $A$ reduces the uncertainty in $B$ or vice versa. Formally, the mutual information between $A$ and $B$ is:
30
+
31
+ $$
32
+ I (A, B) = H (A) - H (A \mid B) = H (B) - H (B \mid A).
33
+ $$
34
+
35
+ Consider $A$ and $B$ to be different views of an input data (e.g., a word and its context, two different partitions of a sentence). Consider a function $f$ that takes $A = a$ and $B = b$ as its input. The goal of training is to learn parameters of the function $f$ that maximizes $I(A, B)$ .
36
+
37
+ Maximizing mutual information directly is generally intractable when the function $f$ consists of modern encoders such as neural networks (Paninski, 2003), so we need to resort to a lower bound on $I(A,B)$ . One particular lower bound that has been shown to work well in practice is InfoNCE (Logeswaran & Lee, 2018; van den Oord et al., 2019), which is based on Noise Contrastive Estimation (NCE; Gutmann & Hyvarinen, 2012). InfoNCE is defined as:
38
+
39
+ $$
40
+ I (A, B) \geq \mathbb {E} _ {p (A, B)} \left[ f _ {\boldsymbol {\theta}} (a, b) - \mathbb {E} _ {q (\tilde {\mathcal {B}})} \left[ \log \sum_ {\tilde {b} \in \tilde {\mathcal {B}}} \exp f _ {\boldsymbol {\theta}} (a, \tilde {b}) \right] \right] + \log | \tilde {\mathcal {B}} |, \tag {1}
41
+ $$
42
+
43
+ where $a$ and $b$ are different views of an input sequence, $f_{\theta} \in \mathbb{R}$ is a function parameterized by $\pmb{\theta}$ (e.g., a dot product between encoded representations of a word and its context, a dot product between encoded representations of two partitions of a sentence), and $\tilde{\mathcal{B}}$ is a set of samples drawn from a proposal distribution $q(\tilde{\mathcal{B}})$ . The set $\tilde{\mathcal{B}}$ contains the positive sample $b$ and $|\tilde{\mathcal{B}}| - 1$ negative samples.
44
+
45
+ Learning representations based on this objective is also known as contrastive learning. Arora et al. (2019) show representations learned by such a method have provable performance guarantees and reduce sample complexity on downstream tasks.
46
+
47
+ We note that InfoNCE is related to cross-entropy. When $\tilde{\mathcal{B}}$ always includes all possible values of the random variable $B$ (i.e., $\tilde{\mathcal{B}} = \mathcal{B}$ ) and they are uniformly distributed, maximizing InfoNCE is analogous to maximizing the standard cross-entropy loss:
48
+
49
+ $$
50
+ \mathbb {E} _ {p (A, B)} \left[ f _ {\boldsymbol {\theta}} (a, b) - \log \sum_ {\tilde {b} \in \mathcal {B}} \exp f _ {\boldsymbol {\theta}} (a, \tilde {b}) \right]. \tag {2}
51
+ $$
52
+
53
+ Eq. 2 above shows that InfoNCE is related to maximizing $p_{\theta}(b \mid a)$ , and it approximates the summation over elements in $\mathcal{B}$ (i.e., the partition function) by negative sampling. As a function of the negative samples, the InfoNCE bound is tighter when $\widetilde{\mathcal{B}}$ contains more samples (as can be seen in Eq. 1 above by inspecting the log $|\widetilde{\mathcal{B}}|$ term). Approximating a softmax over a large vocabulary with negative samples is a popular technique that has been widely used in natural language processing in the past. We discuss it here to make the connection under this framework clear.
54
+
55
+ # 3 MODELS
56
+
57
+ We describe how Skip-gram, BERT, and XLNet fit into the mutual information maximization framework as instances of InfoNCE. In the following, we assume that $f_{\theta}(a,b) = g_{\psi}(b)^{\top}g_{\omega}(a)$ , where $\pmb{\theta} = \{\pmb{\omega},\pmb{\psi}\}$ . Denote the vocabulary set by $\mathcal{V}$ and the size of the vocabulary by $V$ . For word representation learning, we seek to learn an encoder parameterized by $\pmb{\omega}$ to represent each word in a sequence $\pmb{x} = \{x_1,x_1,\dots ,x_T\}$ in $d$ dimensions. For each of the models we consider in this paper, $a$ and $b$ are formed by taking different parts of $x$ (e.g., $a := x_0$ and $b := x_T$ ).
58
+
59
+ # 3.1 SKIP-GRAM
60
+
61
+ We first start with a simple word representation learning model Skip-gram (Mikolov et al., 2013). Skip-gram is a method for learning word representations that relies on the assumption that a good representation of a word should be predictive of its context. The objective function that is maximized in Skip-gram is: $\mathbb{E}_{p(x_i, x_j^i)}[p(x_j^i \mid x_i)]$ , where $x_i$ is a word token and $x_j^i$ is a context word of $x_i$ .
62
+
63
+ Let $b$ be the context word to be predicted $x_{j}^{i}$ and $a$ be the input word $x_{i}$ . Recall that $f_{\theta}(a,b)$ is $g_{\psi}(b)^{\top}g_{\omega}(a)$ . The skip-gram objective function can be written as an instance of InfoNCE (Eq. 1) where $g_{\psi}(b)$ and $g_{\omega}(a)$ are embedding lookup functions that map each word type to $\mathbb{R}^d$ . (i.e., $g_{\psi}(b), g_{\omega}(a): \mathcal{V} \to \mathbb{R}^d$ ).
64
+
65
+ $p(x_{j}^{i}\mid x_{i})$ can either be computed using a standard softmax over the entire vocabulary or with negative sampling (when the vocabulary is very large). These two approaches correspond to different choices of $\tilde{\mathcal{B}}$ . In the softmax approach, $\tilde{\mathcal{B}}$ is the full vocabulary set $\mathcal{V}$ and each word in $\mathcal{V}$ is uniformly distributed. In negative sampling, $\tilde{\mathcal{B}}$ is a set of negative samples drawn from e.g., a unigram distribution.
66
+
67
+ While Skip-gram has been widely accepted as an instance contrastive learning (Mikolov et al., 2013; Mnih & Kavukcuoglu, 2013), we include it here to illustrate its connection with modern approaches such as BERT and XLNet described subsequently. We can see that the two views of an input sentence that are considered in Skip-gram are two words that appear in the same sentence, and they are encoded using simple lookup functions.
68
+
69
+ # 3.2 BERT
70
+
71
+ Devlin et al. (2018) introduce two self-supervised tasks for learning contextual word representations: masked language modeling and next sentence prediction. Previous work suggests that the next sentence prediction objective is not necessary to train a high quality BERT encoder and the masked language modeling appears to be the key to learn good representations (Liu et al., 2019; Joshi et al., 2019; Lample & Conneau, 2019), so we focus on masked language modeling here. However, we also show how next sentence prediction fits into our framework in Appendix A.
72
+
73
+ In masked language modeling, given a sequence of word tokens of length $T$ , $\pmb{x} = \{x_{1},\dots,x_{T}\}$ , BERT replaces $15\%$ of the tokens in the sequence with (i) a mask symbol $80\%$ of the time, (ii) a random word $10\%$ of the time, or (iii) its original word. For each replaced token, it introduces a term in the masked language modeling training objective to predict the original word given the perturbed sequence $\hat{\pmb{x}}_i = \{x_1,\dots,\hat{x}_i,\dots,x_T\}$ (i.e., the sequence $\pmb{x}$ masked at $x_{i}$ ). This training objective can be written as: $\mathbb{E}_{p(x_i,\hat{x}_i)}[p(x_i\mid \hat{x}_i)]$ .
74
+
75
+ Following our notation in §2, we have $f_{\theta}(a,b) = g_{\psi}(b)^{\top}g_{\omega}(a)$ . Let $b$ be a masked word $x_{i}$ and $a$ be the masked sequence $\hat{\pmb{x}}_i$ . Consider a Transformer encoder parameterized by $\omega$ and denote $g_{\omega}(\hat{\pmb{x}}_i)\in \mathbb{R}^d$ as a function that returns the final hidden state corresponding to the $i$ -th token (i.e., the masked token) after running $\hat{\pmb{x}}_i$ through the Transformer. Let $g_{\psi}:\mathcal{V}\to \mathbb{R}^{d}$ be a lookup function that maps each word type into a vector and $\tilde{\mathcal{B}} = \mathcal{B}$ be the full vocabulary set $\mathcal{V}$ . Under this formulation, the masked language modeling objective maximizes Eq. 1 and different choices of masking probabilities can be understood as manipulating the joint distributions $p(a,b)$ . In BERT, the two views of a sentence correspond to a masked word in the sentence and its masked context.
76
+
77
+ Contextual vs. non-contextual. It is generally understood that the main difference between Skip-gram and BERT is that Skip-gram learns representations of word types (i.e., the representation for a word is always the same regardless of the context it appears in) and BERT learns representations of word tokens. We note that under our formulation for either Skip-gram or BERT, the encoder that we want to learn appears in $g_{\omega}$ , and $g_{\psi}$ is not used after training. We show that Skip-gram and BERT maximizes a similar objective, and the main difference is in the choice of the encoder that forms $g_{\omega}$ —a context dependent Transformer encoder that takes a sequence as its input for BERT and a simple word embedding lookup for Skip-gram.
78
+
79
+ # 3.3 XLNET
80
+
81
+ Yang et al. (2019) propose a permutation language modeling objective to learn contextual word representations. This objective considers all possible factorization permutations of a joint distribution of a sentence. Given a sentence $\pmb{x} = \{x_{1},\dots,x_{T}\}$ , there are $T!$ ways to factorize its joint distribution. Given a sentence $\pmb{x}$ , denote a permutation by $z\in \mathbb{Z}$ . XLNet optimizes the objective function:
82
+
83
+ $$
84
+ \mathbb {E} _ {p (\boldsymbol {x})} \left[ \mathbb {E} _ {p (\boldsymbol {z})} \left[ \sum_ {t = 1} ^ {T} \log p (x _ {t} ^ {\boldsymbol {z}} \mid \boldsymbol {x} _ {< t} ^ {\boldsymbol {z}}) \right] \right].
85
+ $$
86
+
87
+ As a running example, consider a permutation order $3, 1, 5, 2, 4$ for a sentence $x_{1}, x_{2}, x_{3}, x_{4}, x_{5}$ . Given the order, XLNet is only trained to predict the last $S$ tokens in practice. For $S = 1$ , the context sequence used for training is $x_{1}, x_{2}, x_{3}, \ldots, x_{5}$ , with $x_{4}$ being the target word.
88
+
89
+ In addition to replacing the Transformer encoder with Transformer XL (Dai et al., 2019), a key architectural innovation of XLNet is the two-stream self-attention. In two-stream self-attention, a shared encoder is used to compute two sets of hidden representations from one original sequence. They are called the query stream and the content stream. In the query stream, the input sequence is masked at the target position, whereas the content stream sees the word at the target position. Words at future positions for the permutation order under consideration are also masked in both streams. These masks are implemented as two attention mask matrices. During training, the final hidden representation for a target position from the query stream is used to predict the target word.
90
+
91
+ Since there is only one set of encoder parameters for both streams, we show that we can arrive at the permutation language modeling objective from the masked language modeling objective with an architectural change in the encoder. Denote a hidden representation by $\mathbf{h}_t^k$ , where $t$ indexes the position and $k$ indexes the layer, and consider the training sequence $x_{1}, x_{2}, x_{3}, \ldots, x_{5}$ and the permutation order $3, 1, 5, 2, 4$ . In BERT, we compute attention scores to obtain $\mathbf{h}_t^k$ from $\mathbf{h}_t^{k-1}$ for every $t$ (i.e., $t = 1, \ldots, T$ ), where $\mathbf{h}_4^0$ is the embedding for the mask symbol. In XLNet, the attention scores for future words in the permutation order are masked to 0. For example, when we compute $\mathbf{h}_1^k$ , only the attention score from $\mathbf{h}_3^{k-1}$ is considered (since the permutation order is $3, 1, 5, 2, 4$ ). For $\mathbf{h}_5^k$ , we use $\mathbf{h}_1^{k-1}$ and $\mathbf{h}_3^{k-1}$ . XLNet does not require a mask symbol embedding since the attention score from a masked token is always zeroed out with an attention mask (implemented as a matrix). As a result, we can consider XLNet training as masked language modeling with stochastic attention masks in the encoder.
92
+
93
+ It is now straightforward to see that the permutation language modeling objective is an instance of Eq.1, where $b$ is a target token $x_{i}$ and $a$ is a masked sequence $\hat{\pmb{x}}_i = \{x_1,\dots ,\hat{x}_i,\dots ,x_T\}$ . Similar to
94
+
95
+ Table 1: Summary of methods as instances of contrastive learning. See text for details.
96
+
97
+ <table><tr><td>Objective</td><td>a</td><td>b</td><td>p(a,b)</td><td>gω</td><td>gψ</td></tr><tr><td>Skip-gram</td><td>word</td><td>word</td><td>word and its context</td><td>lookup</td><td>lookup</td></tr><tr><td>MLM</td><td>context</td><td>masked word</td><td>masked tokens probability</td><td>Transformer</td><td>lookup</td></tr><tr><td>NSP</td><td>sentence</td><td>sentence</td><td>(non-)consecutive sentences</td><td>Transformer</td><td>lookup</td></tr><tr><td>XLNet</td><td>context</td><td>masked word</td><td>factorization permutation</td><td>TXL++</td><td>lookup</td></tr><tr><td>DIM</td><td>context</td><td>masked n-grams</td><td>sentence and its n-grams</td><td>Transformer</td><td>not used</td></tr></table>
98
+
99
+ BERT, we have a Transformer encoder parameterized by $\omega$ and denote $g_{\omega}(\hat{x}_i) \in \mathbb{R}^d$ as a function that returns the final hidden state corresponding to the $i$ -th token (i.e., the masked token) after running $\hat{x}_i$ through the Transformer. Let $g_{\psi}: \mathcal{V} \to \mathbb{R}^d$ be a lookup function that maps each word type into a vector and $\tilde{\mathcal{B}} = \mathcal{B}$ be the full vocabulary set $\mathcal{V}$ . The main difference between BERT and XLNet is that the encoder that forms $g_{\omega}$ used in XLNet implements attention masking based on a sampled permutation order when building its representations. In addition, XLNet and BERT also differ in the choice of $p(a, b)$ since each of them has its own masking procedure. However, we can see that both XLNet and BERT maximize the same objective.
100
+
101
+ # 4 INFOWORD
102
+
103
+ Our analysis on Skip-Gram, BERT, and XLNet shows that their objective functions are different instances of InfoNCE in Eq.1, although they are typically trained using the entire vocabulary set for $\tilde{\mathcal{B}}$ instead of negative sampling. These methods differ in how they choose which views of a sentence they use as $a$ and $b$ , the data distribution $p(a,b)$ , and the architecture of the encoder for computing $g_{\omega}$ which we summarize in Table 1. Seen under this unifying framework, we can observe that progress in the field has largely been driven by using a more powerful encoder to represent $g_{\omega}$ . While we only provide derivations for Skip-gram, BERT, and XLNet, it is straightforward to show that other language-modeling-based pretraining-objectives such as those used in ELMo (Peters et al., 2018) and GPT-2 (Radford et al., 2019) can be formulated under this framework.
104
+
105
+ Our framework also allows us to draw connections to other mutual information maximization representation learning methods that have been successful in other domains (e.g., computer vision, audio processing, reinforcement learning). In this section, we discuss an example derive insights to design a simple self-supervised objective for learning better language representations.
106
+
107
+ Deep InfoMax (DIM; Hjelm et al., 2019) is a mutual information maximization based representation learning method for images. DIM shows that maximizing the mutual information between an image representation and local regions of the image improves the quality of the representation. The complete objective function that DIM maximizes consists of multiple terms. Here, we focus on a term in the objective that maximizes the mutual information between local features and global features. We describe the main idea of this objective for learning representations from a one-dimensional sequence, although it is originally proposed to learn from a two-dimensional object.
108
+
109
+ Given a sequence $\pmb{x} = \{x_{1}, x_{2}, \dots, x_{T}\}$ , we consider the "global" representation of the sequence to be the hidden state of the first token (assumed to be a special start of sentence symbol) after contextually encoding the sequence $g_{\omega}(\pmb{x})$ , and the local representations to be the encoded representations of each word in the sequence $g_{\psi}(x_{t})$ . We can use the contrastive learning framework to design a task that maximizes the mutual information between this global representation vector and its corresponding "local" representations using local representations from other sequences $g_{\psi}(\hat{x}_{t})$ as negative samples. This is analogous to training the global representation vector of a sentence to choose which words appear in the sentence and which words are from other sentences. However, if we feed the original sequence $\pmb{x}$ to the encoder and take the hidden state of the first token as the global
110
+
111
+ representation, the task becomes trivial since the global representation is built using all the words in the sequence. We instead use a masked sequence $a \coloneqq \hat{\pmb{x}}_t = \{x_1, \dots, \hat{x}_t, \dots, x_T\}$ and $b \coloneqq x_t$ .
112
+
113
+ State-of-the-art methods based on language modeling objectives consider all negative samples since the second view of the input data (i.e., the part denoted by $b$ in Eq. 1) that are used is simple and it consists of only a target word—hence the size of the negative set is still manageable. A major benefit of the contrastive learning framework is that we only need to be able to take negative samples for training. Instead of individual words, we can use $n$ -grams as the local representations.<sup>6</sup> Denote an $n$ -gram by $x_{i:j}$ and a masked sequence masked at position $i$ to $j$ by $\hat{x}_{i:j}$ . We define $\mathcal{I}_{\mathrm{DIM}}$ as:
114
+
115
+ $$
116
+ \mathbb {I} _ {\mathrm {D I M}} = \mathbb {E} _ {p (\hat {\boldsymbol {x}} _ {i: j}, \boldsymbol {x} _ {i: j})} \left[ g _ {\boldsymbol {\omega}} (\hat {\boldsymbol {x}} _ {i: j}) ^ {\top} g _ {\boldsymbol {\omega}} (\boldsymbol {x} _ {i: j}) - \log \sum_ {\tilde {\boldsymbol {x}} _ {i: j} \in \tilde {\mathcal {S}}} \exp (g _ {\boldsymbol {\omega}} (\hat {\boldsymbol {x}} _ {i: j}) ^ {\top} g _ {\boldsymbol {\omega}} (\tilde {\boldsymbol {x}} _ {i: j})) \right],
117
+ $$
118
+
119
+ where $\hat{\pmb{x}}_{i:j}$ is a sentence masked at position $i$ to $j$ , $\pmb{x}_{i:j}$ is an $n$ -gram spanning from $i$ to $j$ , and $\tilde{\pmb{x}}_{i:j}$ is an $n$ -gram from a set $\tilde{\mathbb{S}}$ that consists of the positive sample $\pmb{x}_{i:j}$ and negative $n$ -grams from other sentences in the corpus. We use one Transformer to encode both views, so we do not need $g_{\psi}$ here.
120
+
121
+ Since the main goal of representation learning is to train an encoder parameterized by $\omega$ , it is possible to combine multiple self-supervised tasks into an objective function in the contrastive learning framework. Our model, which we denote INFOWORD, combines the above objective—which is designed to improve sentence and span representations—with a masked language modeling objective $\mathcal{I}_{\mathrm{MLM}}$ for learning word representations. The only difference between our masked language modeling objective and the standard masked language modeling objective is that we use negative sampling to construct $\tilde{\mathcal{V}}$ by sampling from the unigram distribution. We have:
122
+
123
+ $$
124
+ \mathcal {I} _ {\mathrm {M L M}} = \mathbb {E} _ {p (\hat {\boldsymbol {x}} _ {i}, \boldsymbol {x} _ {i})} \left[ g _ {\boldsymbol {\omega}} (\hat {\boldsymbol {x}} _ {i}) ^ {\top} g _ {\boldsymbol {\psi}} (\boldsymbol {x} _ {i}) - \log \sum_ {\tilde {\boldsymbol {x}} _ {i} \in \tilde {\mathcal {V}}} \exp (g _ {\boldsymbol {\omega}} (\hat {\boldsymbol {x}} _ {i}) ^ {\top} g _ {\boldsymbol {\psi}} (\tilde {\boldsymbol {x}} _ {i})) \right],
125
+ $$
126
+
127
+ where $\hat{x}_i$ a sentence masked at position $i$ and $x_i$ is the $i$ -th token in the sentence.
128
+
129
+ Our overall objective function is a weighted combination of the two terms above:
130
+
131
+ $$
132
+ \mathcal {I} _ {\text {I N F O W O R D}} = \lambda_ {\text {M L M}} \mathcal {I} _ {\text {M L M}} + \lambda_ {\text {D I M}} \mathcal {I} _ {\text {D I M}},
133
+ $$
134
+
135
+ where $\lambda_{\mathrm{MLM}}$ and $\lambda_{\mathrm{DIM}}$ are hyperparameters that balance the contribution of each term.
136
+
137
+ # 5 EXPERIMENTS
138
+
139
+ In this section, we evaluate the effects of training masked language modeling with negative sampling and adding $\mathcal{I}_{\mathrm{DIM}}$ to the quality of learned representations.
140
+
141
+ # 5.1 SETUP
142
+
143
+ We largely follow the same experimental setup as the original BERT model (Devlin et al., 2018). We have two Transformer architectures similar to $\mathrm{BERT}_{\mathrm{BASE}}$ and $\mathrm{BERT}_{\mathrm{LARGE}}$ . $\mathrm{BERT}_{\mathrm{BASE}}$ has 12 hidden layers, 768 hidden dimensions, and 12 attention heads (110 million parameters); whereas $\mathrm{BERT}_{\mathrm{LARGE}}$ has 24 hidden layers, 1024 hidden dimensions, and 16 attention heads (340 million parameters).
144
+
145
+ For each of the Transformer variant above, we compare three models in our experiments:
146
+
147
+ - BERT: The original BERT model publicly available in https://github.com/google-research/bert.
148
+ - BERT-NCE: Our reimplementation of BERT. It differs from the original implementation in several ways: (1) we only use the masked language modeling objective and remove next sentence prediction, (2) we use negative sampling instead of softmax, and (3) we only use one sentence for each training example in a batch.
149
+
150
+ - INFOWORD: Our model described in §4. The main difference between INFOWORD and BERT-NCE is the addition of $\mathcal{I}_{\mathrm{DIM}}$ to the objective function. We discuss how we mask the data for $\mathcal{I}_{\mathrm{DIM}}$ in §5.2.
151
+
152
+ # 5.2 PRETRAINING
153
+
154
+ We use the same training corpora and apply the same preprocessing and tokenization as BERT. We create masked sequences for training with $\mathcal{I}_{\mathrm{DIM}}$ as follows. We iteratively sample $n$ -grams from a sequence until the masking budget ( $15\%$ of the sequence length) has been spent. At each sampling iteration, we first sample the length of the $n$ -gram (i.e., $n$ in $n$ -grams) from a Gaussian distribution $\mathcal{N}(5, 1)$ clipped at 1 (minimum length) and 10 (maximum length). Since BERT tokenizes words into subwords, we measure the $n$ -gram length at the word level and compute the masking budget at the subword level. This procedure is inspired by the masking approach in Joshi et al. (2019).
155
+
156
+ For negative sampling, we use words and $n$ -grams from other sequences in the same batch as negative samples (for MLM and DIM respectively). There are approximately 70,000 subwords and 10,000 $n$ -grams (words and phrases) in a batch. We discuss hyperparameter details in Appendix B.
157
+
158
+ # 5.3 FINE-TUNING
159
+
160
+ We evaluate on two benchmarks: GLUE (Wang et al., 2019) and SQuAD(Rajpurkar et al., 2016). We train a task-specific decoder and fine-tune pretrained models for each dataset that we consider. We describe hyperparameter details in Appendix B.
161
+
162
+ GLUE is a set of natural language understanding tasks that includes sentiment analysis, linguistic acceptability, paraphrasing, and natural language inference. Each task is formulated as a classification task. The tasks in GLUE are either a single-sentence classification task or a sentence pair classification task. We follow the same setup as the original BERT model and add a start of sentence symbol (i.e., the CLS symbol) to every example and use a separator symbol (i.e., the SEP symbol) to separate two concatenated sentences (for sentence pair classification tasks). We add a linear transformation and a softmax layer to predict the correct label (class) from the representation of the first token of the sequence.
163
+
164
+ SQuAD is a reading comprehension dataset constructed from Wikipedia articles. We report results on SQuAD 1.1. Here, we also follow the same setup as the original BERT model and predict an answer span—the start and end indices of the correct answer in the context. We use a standard span predictor as the decoder, which we describe in details in Appendix C.
165
+
166
+ # 5.4 RESULTS
167
+
168
+ We show our main results in Table 2 and Table 3. Our BERT reimplementation with negative sampling underperforms the original BERT model on GLUE but is significantly better on SQuAD. However, we think that the main reasons for this performance discrepancy are the different masking procedures (we use span-based masking instead of whole-word masking) and the different ways training examples are presented to the model (we use one consecutive sequence instead of two sequences separated by the separator symbol). Comparing BERT-NCE and INFOWORD, we observe the benefit of the new self-supervised objective $\mathcal{I}_{\mathrm{DIM}}$ (better overall GLUE and SQuAD results), particularly on tasks such as question answering and linguistics acceptability that seem to require understanding of longer phrases. In order to better understand our model, we investigate its performance with varying numbers of training examples and different values of $\lambda_{\mathrm{DIM}}$ on the SQuAD development set and show the results in Figure 1 (for models with the BASE configuration). We can see that INFOWORD consistently outperforms BERT-NCE and the performance gap is biggest when the dataset is smallest, suggesting the benefit of having better pretrained representations when there are fewer training examples.
169
+
170
+ Table 2: Summary of results on GLUE.
171
+
172
+ <table><tr><td></td><td>Model</td><td>CoLA</td><td>SST-2</td><td>MRPC</td><td>QQP</td><td>MNLI (M/MM)</td><td>QNLI</td><td>RTE</td><td>GLUE AVG</td></tr><tr><td rowspan="3">BASE</td><td>BERT</td><td>52.1</td><td>93.5</td><td>88.9</td><td>71.2</td><td>84.6/83.4</td><td>90.5</td><td>66.4</td><td>78.8</td></tr><tr><td>BERT-NCE</td><td>50.8</td><td>93.0</td><td>88.6</td><td>70.5</td><td>83.2/83.0</td><td>90.9</td><td>65.9</td><td>78.2</td></tr><tr><td>INFOWORD</td><td>53.3</td><td>92.5</td><td>88.7</td><td>71.0</td><td>83.7/82.4</td><td>91.4</td><td>68.3</td><td>78.9</td></tr><tr><td rowspan="3">LARGE</td><td>BERT</td><td>60.5</td><td>94.9</td><td>89.3</td><td>72.1</td><td>86.7/85.9</td><td>92.7</td><td>70.1</td><td>81.5</td></tr><tr><td>BERT-NCE</td><td>54.7</td><td>93.1</td><td>89.5</td><td>71.2</td><td>85.8/85.0</td><td>92.7</td><td>72.5</td><td>80.6</td></tr><tr><td>INFOWORD</td><td>57.5</td><td>94.2</td><td>90.2</td><td>71.3</td><td>85.8/84.8</td><td>92.6</td><td>72.0</td><td>81.1</td></tr></table>
173
+
174
+ Table 3: Summary of results on SQuAD 1.1.
175
+
176
+ <table><tr><td rowspan="2" colspan="2">Model</td><td colspan="2">DEV</td><td colspan="2">TEST</td></tr><tr><td>F1</td><td>EM</td><td>F1</td><td>EM</td></tr><tr><td rowspan="3">BASE</td><td>BERT</td><td>88.5</td><td>80.8</td><td>-</td><td>-</td></tr><tr><td>BERT-NCE</td><td>90.2</td><td>83.3</td><td>90.9</td><td>84.4</td></tr><tr><td>INFOWORD</td><td>90.7</td><td>84.0</td><td>91.4</td><td>84.7</td></tr><tr><td rowspan="3">LARGE</td><td>BERT</td><td>90.9</td><td>84.1</td><td>91.3</td><td>84.3</td></tr><tr><td>BERT-NCE</td><td>92.0</td><td>85.9</td><td>92.7</td><td>86.6</td></tr><tr><td>INFOWORD</td><td>92.6</td><td>86.6</td><td>93.1</td><td>87.3</td></tr></table>
177
+
178
+ # 5.5 DISCUSSION
179
+
180
+ Span-based models. We show how to design a simple self-supervised task in the InfoNCE framework that improves downstream performance on several datasets. Learning language representations to predict contiguous masked tokens has been explored in other context, and the objective introduced in $\mathcal{I}_{\mathrm{DIM}}$ is related to these span-based models such as SpanBERT (Joshi et al., 2019) and MASS (Song et al., 2019). While our experimental goal is to demonstrate the benefit of contrastive learning for constructing self-supervised tasks, we note that INFOWORD is simpler to train and exhibits similar trends to SpanBERT that outperforms baseline models. We leave exhaustive comparisons to these methods to future work.
181
+
182
+ Mutual information maximization. A recent study has questioned whether the success of InfoNCE as an objective function is due to its property as a lower bound on mutual information and provides an alternative hypothesis based on metric learning (Tschannen et al., 2019). Regardless of the prevailing perspective, InfoNCE is widely accepted as a good representation learning objective, and formulating state-of-the-art language representation learning methods under this framework offers valuable insights that unifies many popular representation learning methods.
183
+
184
+ Regularization. Image representation learning methods often incorporate a regularization term in its objective function to encourage learned representations to look like a prior distribution (Hjelm et al., 2019; Bachman et al., 2019). This is useful for incorporating prior knowledge into a representation learning model. For example, the DeepInfoMax model has a term in its objective that encourages the learned representation from the encoder to match a uniform prior. Regularization is not commonly used when learning language representations. Our analysis and the connection we draw to representation learning methods used in other domains provide an insight into possible ways to incorporate prior knowledge into language representation learning models.
185
+
186
+ Future directions. The InfoNCE framework provides a holistic way to view progress in language representation learning. The framework is very flexible and suggests several directions that can be explored to improve existing methods. We show that progress in the field has been largely driven by innovations in the encoder which forms $g_{\omega}$ . InfoNCE is based on maximizing the mutual information between different views of an input data, and it facilitates training on structured views as long as we can perform negative sampling (van den Oord et al., 2019; Bachman et al., 2019). Our analysis demonstrates that existing methods based on language modeling objectives only consider a single target word as one of the views. We think that incorporating more complex views (e.g., higher-order or skip $n$ -grams, syntactic and semantic parses, etc.) and designing appropriate self-supervised tasks
187
+
188
+ ![](images/6ecc71334866f92d0e1238e0b57faedd1436b394a026ae99a3530085313f278a.jpg)
189
+ Figure 1: The left plot shows $F_{1}$ scores of BERT-NCE and INFOWORD as we increase the percentage of training examples on SQuAD (dev). The right plot shows $F_{1}$ scores of INFOWORD on SQuAD (dev) as a function of $\lambda_{\mathrm{DIM}}$ .
190
+
191
+ ![](images/8bf4b5b9e553f8da2cd450a9f887d3753ac6c6b079e38a910313b659b1c57e7b.jpg)
192
+
193
+ is a promising future direction. A related area that is also underexplored is designing methods to obtain better negative samples.
194
+
195
+ # 6 CONCLUSION
196
+
197
+ We analyzed state-of-the-art language representation learning methods from the perspective of mutual information maximization. We provided a unifying view of classical and modern word embedding models and showed how they relate to popular representation learning methods used in other domains. We used this framework to construct a new self-supervised task based on maximizing the mutual information between the global representation and local representations of a sentence. We demonstrated the benefit of this new task via experiments on GLUE and SQuAD.
198
+
199
+ # REFERENCES
200
+
201
+ Sanjeev Arora, Hrishikesh Khandeparkar, Mikhail Khodak, Orestis Plevrakis, and Nikunj Saunshi. A theoretical analysis of contrastive unsupervised representation learning. In Proc. of ICML, 2019.
202
+ Philip Bachman, R Devon Hjelm, and William Buchwalter. Learning representations by maximizing mutual information across views. arXiv preprint 1906.00910, 2019.
203
+ Mohamed Ishmael Belghazi, Aristide Baratin, Sai Rajeswar, Sherjil Ozair, Yoshua Bengio, Aaron Courville, and R Devon Hjelm. Mine: Mutual information neural estimation. In Proc. of ICML, 2018.
204
+ Andrew M. Dai and Quoc V. Le. Semi-supervised sequence learning. In Proc. of NIPS, 2015.
205
+ Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc V. Le, and Ruslan Salakhutdinov. Transformer-XL: Attentive language models beyond a fixed-length context. In Proc. of ACL, 2019.
206
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proc. of NAACL, 2018.
207
+ M. D. Donsker and S. R. S. Varadhan. Asymptotic evaluation of certain markov process expectations for large time. iv. Communications on Pure and Applied Mathematics, 36(2):183-212, 1983.
208
+ Michael U. Gutmann and Aapo Hyvarinen. Noise-contrastive estimation of unnormalized statistical models, with applications to natural image statistics. Journal of Machine Learning Research, 13: 307-361, 2012.
209
+ R Devon Hjelm, Alex Fedorov, Samuel Lavoie-Marchildon, Karan Grewal, Phil Bachman, Adam Trischler, and Yoshua Bengio. Learning deep representations by mutual information estimation and maximization. In Proc. of ICLR, 2019.
210
+ Jeremy Howard and Sebastian Ruder. Universal language model fine-tuning for text classification. In Proc. of ACL, 2018.
211
+
212
+ Mandar Joshi, Danqi Chen, Yinhan Liu, Daniel S. Weld, Luke Zettlemoyer, and Omer Levy. Span-BERT: Improving pre-training by representing and predicting spans. arXiv preprint 1907.10529, 2019.
213
+ Diederik P. Kingma and Jimmy Lei Ba. Adam: a method for stochastic optimization. In Proc. of ICLR, 2015.
214
+ Guillaume Lample and Alexis Conneau. Cross-lingual language model pretraining. arXiv preprint 1901.07291, 2019.
215
+ Ralph Linsker. Self-organization in a perceptual network. Computer, 21(3):105-117, 1988.
216
+ Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. RoBERTa: A robustly optimized bert pretraining approach. arXiv preprint 1907.11692, 2019.
217
+ Lajanugen Logeswaran and Honglak Lee. An efficient framework for learning sentence representations. In Proc. of ICLR, 2018.
218
+ Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed representations of words and phrases and their compositionality. In Proc. of NIPS, 2013.
219
+ Andriy Mnih and Koray Kavukcuoglu. Learning word embeddings efficiently with noise-contrastive estimation. In Proc. of NIPS, 2013.
220
+ Sebastian Nowozin, Botond Cseke, , and Ryota Tomioka. f-gan: Training generative neural samplers using variational divergence minimization. In Proc. of NIPS, 2016.
221
+ Sindy Lowe Peter O'Connor and Bastiaan S. Veeling. Greedy infomax for biologically plausible self-supervised representation learning. In Proc. of NeurIPS, 2019.
222
+ Liam Paninski. Estimation of entropy and mutual information. Neural computation, 15(6):1191-1253, 2003.
223
+ Jeffrey Pennington, Richard Socher, and Christopher D. Manning. Glove: Global vectors for word representation. In Proc. of EMNLP, 2014.
224
+ Matthew E. Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. Deep contextualized word representations. In Proc. of NAACL, 2018.
225
+ Ben Poole, Sherjil Ozair, Aaron van den Oord, Alexander A. Alemi, and George Tucker. On variational lower bounds of mutual information. In Proc. of ICML, 2019.
226
+ Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. Technical report, OpenAI, 2018.
227
+ Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. Language models are unsupervised multitask learners. Technical report, OpenAI, 2019.
228
+ Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. SQuAD: 100,000+ questions for machine comprehension of text. In Proc. of EMNLP, 2016.
229
+ Pranav Rajpurkar, Robin Jia, and Percy Liang. Know what you don't know: Unanswerable questions for squad. In Proc. of ACL, 2018.
230
+ Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, and Tie-Yan Liu. MASS: Masked sequence to sequence pre-training for language generation. In Proc. of ICML, 2019.
231
+ Michael Tschannen, Josip Djolonga, Paul K. Rubenstein, and Sylvain Gelly. On mutual information maximization for representation learning. arXiv preprint 1907.13625, 2019.
232
+ Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint 1807.03748, 2019.
233
+
234
+ Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In Proc. of ICLR, 2019.
235
+ Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, and Quoc V. Le. XLNet: Generalized autoregressive pretraining for language understanding. arXiv preprint 1906.08237, 2019.
236
+ Dani Yogatama, Cyprien de Masson d'Autume, Jerome Connor, Tomas Kocisky, Mike Chrzanowski, Lingpeng Kong, Angeliki Lazaridou, Wang Ling, Lei Yu, Chris Dyer, and Phil Blunsom. Learning and evaluating general linguistic intelligence. arXiv preprint 1901.11373, 2019.
237
+
238
+ # A NEXT SENTENCE PREDICTION
239
+
240
+ We show that the next sentence prediction objective used in BERT is an instance of contrastive learning in this section. In next sentence prediction, given two sentences $\pmb{x}^1$ and $\pmb{x}^2$ , the task is to predict whether these are two consecutive sentences or not. Training data for this task is created by sampling a random second sentence $\hat{\pmb{x}}^2$ from the corpus to be used as a negative example 50% of the time.
241
+
242
+ Consider a discriminator (i.e., a classifier with parameters $\phi$ ) that takes encoded representations of concatenated $x^1$ and $x^2$ and returns a score. We denote this discriminator by $d_{\phi}(x^1, x^2)$ . The next sentence prediction objective function is:
243
+
244
+ $$
245
+ \mathbb {E} _ {p \left(\boldsymbol {x} ^ {1}, \boldsymbol {x} ^ {2}\right)} \left[ \log d _ {\phi} \left(g _ {\omega} \left(\left[ \boldsymbol {x} ^ {1}, \boldsymbol {x} ^ {2} \right]\right)\right) + \log \left(1 - d _ {\phi} \left(g _ {\omega} \left(\left[ \boldsymbol {x} ^ {1}, \tilde {\boldsymbol {x}} ^ {2} \right]\right)\right)\right) \right].
246
+ $$
247
+
248
+ This objective function—which is used for training BERT—is known in the literature as "local" Noise Contrastive Estimation (Gutmann & Hyvarinen, 2012). Since summing over all possible negative sentences is intractable, BERT approximates this by using a binary classifier to distinguish real samples and noisy samples.
249
+
250
+ An alternative approximation to using a binary classifier is to use "global NCE", which is what InfoNCE is based on. Here, we have:
251
+
252
+ $$
253
+ \mathbb {E} _ {p \left(\boldsymbol {x} ^ {1}, \boldsymbol {x} ^ {2}\right)} \left[ \psi^ {\top} g _ {\boldsymbol {\omega}} \left([ \boldsymbol {x} ^ {1}, \boldsymbol {x} ^ {2}) ]\right) - \log \sum_ {\tilde {\boldsymbol {x}} ^ {2} \in \tilde {\mathcal {X}} ^ {2}} \exp \left(\psi^ {\top} \left(g _ {\boldsymbol {\omega}} \left([ \boldsymbol {x} ^ {1}, \tilde {\boldsymbol {x}} ^ {2} ]\right)\right)\right) \right],
254
+ $$
255
+
256
+ where we sample negative sentences from the corpus and combine it with the positive sentence to construct $\tilde{\mathcal{X}}^2$ . To make the connection of this objective function with InfoNCE in Eq. 1 explicit, let $a$ and $b$ be two consecutive sentences $\pmb{x}_1$ and $\pmb{x}_2$ . Let $f_{\pmb{\theta}}(a,b)$ be $\psi^\top g_{\omega}([a,b])$ , where $\psi \in \mathbb{R}^d$ is a trainable parameter, $[a,b]$ denotes a concatenation of $a$ and $b$ . Consider a Transformer encoder parameterized by $\omega$ , and let $g_{\omega}([a,b]) \in \mathbb{R}^d$ be a function that returns the final hidden state of the first token after running the concatenated sequence to the Transformer. Note that the encoder that we want to learn only depends on $g_{\omega}$ , so both of these approximations can be used for training next sentence prediction.
257
+
258
+ # B HYPERPARAMETERS
259
+
260
+ Pretraining. We use Adam (Kingma & Ba, 2015) with $\beta_{1} = 0.9$ , $\beta_{2} = 0.98$ and $\epsilon = 1e - 6$ . The batch size for training is 1024 with a maximum sequence length of 512. We train for 400,000 steps (including 18,000 warmup steps) with a weight decay rate of 0.01. We set the learning rate to $4e^{-4}$ for all variants of the BASE models and $1e^{-4}$ for the LARGE models. We set $\lambda_{\mathrm{MLM}}$ to 1.0 and tune $\lambda_{\mathrm{DIM}} \in \{0.4, 0.6, 0.8, 1.0\}$ .
261
+
262
+ GLUE. We set the maximum sequence length to 128. For each GLUE task, we use the respective development set to choose the learning rate from $\{5e^{-6}, 1e^{-5}, 2e^{-5}, 3e^{-5}, 5e^{-5}\}$ , and the batch size from $\{16, 32\}$ . The number of training epochs is set to 4 for CoLA and 10 for other tasks, following Joshi et al. (2019). We run each hyperparameter configuration 5 times and evaluate the best model on the test set (once).
263
+
264
+ SQuAD. We set the maximum sequence length to 512 and train for 4 epochs. We use the development set to choose the learning rate from $\{5e^{-6}, 1e^{-5}, 2e^{-5}, 3e^{-5}, 5e^{-5}\}$ and the batch size from $\{16, 32\}$ .
265
+
266
+ # C QUESTION ANSWERING DECODER
267
+
268
+ We use a standard span predictor as follows. Denote the length of the context paragraph by $M$ , and $\mathbf{x}^{\mathrm{context}} = \{x_1^{\mathrm{context}}, \ldots, x_M^{\mathrm{context}}\}$ . Denote the encoded representation of the $m$ -th token in the context by $\mathbf{x}_{t,m}^{\mathrm{context}}$ . The question answering decoder introduces two sets of parameters: $\mathbf{w}_{\mathrm{start}}$ and $\mathbf{w}_{\mathrm{end}}$ . The probability of each context token being the start of the answer is computed as: $p(\mathrm{start} = x_{t,m}^{\mathrm{context}} \mid \mathbf{x}_t) = \frac{\exp(\mathbf{w}_{\mathrm{start}}^\top \mathbf{x}_{t,m}^{\mathrm{context}})}{\sum_{n=0}^{M} \exp(\mathbf{w}_{\mathrm{start}}^\top \mathbf{x}_{t,n}^{\mathrm{context}})}$ . The probability of the end index of the answer is computed analogously using $\mathbf{w}_{\mathrm{end}}$ . The predicted answer is the span with the highest probability after multiplying the start and end probabilities.
amutualinformationmaximizationperspectiveoflanguagerepresentationlearning/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e90d14dc5cc02072580f09d20c492878f901a0de02ef49d3b3456d237631298
3
+ size 198518
amutualinformationmaximizationperspectiveoflanguagerepresentationlearning/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b49fa58650fc8d2dcadcb249bfdff642355c2638d0e3714cdd0de7b4a01d8114
3
+ size 476421
andthebitgoesdownrevisitingthequantizationofneuralnetworks/2ff3df52-e78c-49b4-868c-2b4d8290c64b_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7237c76b8fa353dcd2619cfc92fbb919777284c0963918e99f2ef9ceaf2d1203
3
+ size 64109
andthebitgoesdownrevisitingthequantizationofneuralnetworks/2ff3df52-e78c-49b4-868c-2b4d8290c64b_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df54c1a9890ac7519ed15c944bf95178fd8b187e0b748f2d8a46b8c36c1cc509
3
+ size 79751
andthebitgoesdownrevisitingthequantizationofneuralnetworks/2ff3df52-e78c-49b4-868c-2b4d8290c64b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c136b05b1431a7bc1521705493680fe1a8f87c9a795739b9d9002202ba40922a
3
+ size 1535403
andthebitgoesdownrevisitingthequantizationofneuralnetworks/full.md ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AND THE BIT GOES DOWN: REVISITING THE QUANTIZATION OF NEURAL NETWORKS
2
+
3
+ Pierre Stock $^{1,2}$ , Armand Joulin $^{1}$ , Rémi Gribonval $^{2}$ , Benjamin Graham $^{1}$ , Hervé Jégou $^{1}$
4
+
5
+ <sup>1</sup>Facebook AI Research, <sup>2</sup>Univ Rennes, Inria, CNRS, IRISA
6
+
7
+ # ABSTRACT
8
+
9
+ In this paper, we address the problem of reducing the memory footprint of convolutional network architectures. We introduce a vector quantization method that aims at preserving the quality of the reconstruction of the network outputs rather than its weights. The principle of our approach is that it minimizes the loss reconstruction error for in-domain inputs. Our method only requires a set of unlabelled data at quantization time and allows for efficient inference on CPU by using byte-aligned codebooks to store the compressed weights. We validate our approach by quantizing a high performing ResNet-50 model to a memory size of 5 MB ( $20 \times$ compression factor) while preserving a top-1 accuracy of $76.1\%$ on ImageNet object classification and by compressing a Mask R-CNN with a $26 \times$ factor. $^{1}$
10
+
11
+ # 1 INTRODUCTION
12
+
13
+ There is a growing need for compressing the best convolutional networks (or ConvNets) to support embedded devices for applications like robotics and virtual/augmented reality. Indeed, the performance of ConvNets on image classification has steadily improved since the introduction of AlexNet (Krizhevsky et al., 2012). This progress has been fueled by deeper and richer architectures such as the ResNets (He et al., 2015) and their variants ResNeXts (Xie et al., 2017) or DenseNets (Huang et al., 2017). Those models particularly benefit from the recent progress made with weak supervision (Mahajan et al., 2018; Yaliniz et al., 2019; Berthelot et al., 2019). Compression of ConvNets has been an active research topic in the recent years, leading to networks with a $71\%$ top-1 accuracy on ImageNet object classification that fit in 1 MB (Wang et al., 2018b).
14
+
15
+ In this work, we propose a compression method particularly adapted to ResNet-like architectures. Our approach takes advantage of the high correlation in the convolutions by the use of a structured quantization algorithm, Product Quantization (PQ) (Jégou et al., 2011). More precisely, we exploit the spatial redundancy of information inherent to standard convolution filters (Denton et al., 2014). Besides reducing the memory footprint, we also produce compressed networks allowing efficient inference on CPU by using byte-aligned indexes, as opposed to entropy decoders (Han et al., 2016).
16
+
17
+ Our approach departs from traditional scalar quantizers (Han et al., 2016) and vector quantizers (Gong et al., 2014; Carreira-Perpinañán & Idelbayev, 2017) by focusing on the accuracy of the activations rather than the weights. This is achieved by leveraging a weighted $k$ -means technique. To our knowledge this strategy (see Section 3) is novel in this context. The closest work we are aware of is the one by Choi et al. (2016), but the authors use a different objective (their weighted term is derived from second-order information) along with a different quantization technique (scalar quantization). Our method targets a better in-domain reconstruction, as depicted by Figure 1.
18
+
19
+ Finally, we compress the network sequentially to account for the dependency of our method to the activations at each layer. To prevent the accumulation of errors across layers, we guide this compression with the activations of the uncompressed network on unlabelled data: training by distillation (Hinton et al., 2014) allows for both an efficient layer-by-layer compression procedure and a global fine-tuning of the codewords. Thus, we only need a set of unlabelled images to adjust the codewords. As opposed to recent works by Mishra & Marr (2017) or Lopes et al. (2017), our distillation scheme is sequential and the underlying compression method is different (PQ vs. scalar).
20
+
21
+ ![](images/2fb21d71c12f31b09eae125143f0c8720d067c5fbaf19dde5ed4626c79ed83f8.jpg)
22
+ Figure 1: Illustration of our method. We approximate a binary classifier $\varphi$ that labels images as dogs or cats by quantizing its weights. Standard method: quantizing $\varphi$ with the standard objective function (1) promotes a classifier $\widehat{\varphi}_{\mathrm{standard}}$ that tries to approximate $\varphi$ over the entire input space and can thus perform badly for in-domain inputs. Our method: quantizing $\varphi$ with our objective function (2) promotes a classifier $\widehat{\varphi}_{\mathrm{activations}}$ that performs well for in-domain inputs. Images lying in the hatched area of the input space are correctly classified by $\varphi_{\mathrm{activations}}$ but incorrectly by $\varphi_{\mathrm{standard}}$ .
23
+
24
+ We show that applying our approach to the semi-supervised ResNet-50 of Yalniz et al. (Yalniz et al., 2019) leads to a 5 MB memory footprint and a $76.1\%$ top-1 accuracy on ImageNet object classification (hence $20\times$ compression vs. the original model). Moreover, our approach generalizes to other tasks such as image detection. As shown in Section 4.3, we compress a Mask R-CNN (He et al., 2017) with a size budget around 6 MB ( $26\times$ compression factor) while maintaining a competitive performance.
25
+
26
+ # 2 RELATED WORK
27
+
28
+ There is a large body of literature on network compression. We review the works closest to ours and refer the reader to two recent surveys (Guo, 2018; Cheng et al., 2017) for a comprehensive overview.
29
+
30
+ Low-precision training. Since early works like those of Courbariaux et al. (2015), researchers have developed various approaches to train networks with low precision weights. Those approaches include training with binary or ternary weights (Shayer et al., 2017; Zhu et al., 2016; Li & Liu, 2016; Rastegari et al., 2016; McDonnell, 2018), learning a combination of binary bases (Lin et al., 2017) and quantizing the activations (Zhou et al., 2016; 2017; Mishra et al., 2017). Some of these methods assume the possibility to employ specialized hardware that speed up inference and improve power efficiency by replacing most arithmetic operations with bit-wise operations. However, the back-propagation has to be adapted to the case where the weights are discrete.
31
+
32
+ Quantization. Vector Quantization (VQ) and Product Quantization (PQ) have been extensively studied in the context of nearest-neighbor search (Jegou et al., 2011; Ge et al., 2014; Norouzi & Fleet, 2013). The idea is to decompose the original high-dimensional space into a cartesian product of subspaces that are quantized separately with a joint codebook. To our knowledge, Gong et al. (2014) were the first to introduce these stronger quantizers for neural network quantization, followed by Carreira-Perpinañán & Idelbayev (2017). As we will see in the remainder of this paper, employing this discretization off-the-shelf does not optimize the right objective function, and leads to a catastrophic drift of performance for deep networks.
33
+
34
+ Pruning. Network pruning amounts to removing connections according to an importance criteria (typically the magnitude of the weight associated with this connection) until the desired model size/accuracy tradeoff is reached (LeCun et al., 1990). A natural extension of this work is to prune structural components of the network, for instance by enforcing channel-level (Liu et al., 2017) or filter-level (Luo et al., 2017) sparsity. However, these methods alternate between pruning and re-training steps and thus typically require a long training time.
35
+
36
+ Dedicated architectures. Architectures such as SqueezeNet (Iandola et al., 2016), NASNet (Zoph et al., 2017), ShuffleNet (Zhang et al., 2017; Ma et al., 2018), MobileNets (Sandler et al., 2018) and EfficientNets (Tan & Le, 2019) are designed to be memory efficient. As they typically rely on a combination of depth-wise and point-wise convolutional filters, sometimes along with channel shuffling, they are less prone than ResNets to structured quantization techniques such as PQ. These architectures are either designed by hand or using the framework of architecture search (Howard et al., 2019). For instance, the respective model size and test top-1 accuracy of ImageNet of a MobileNet are $13.4\mathrm{MB}$ for $71.9\%$ to be compared with a vanilla ResNet-50 with size $97.5\mathrm{MB}$ for a top-1 of $76.2\%$ . Moreover, larger models such as ResNets can benefit from large-scale weakly- or semi-supervised learning to reach better performance (Mahajan et al., 2018; Yaliniz et al., 2019).
37
+
38
+ Combining some of the mentioned approaches yields high compression factors as demonstrated by Han et al. with Deep Compression (DC) (Han et al., 2016) or more recently by Tung & Mori (Tung & Mori, 2018). Moreover and from a practical point of view, the process of compressing networks depends on the type of hardware on which the networks will run. Recent work directly quantizes to optimize energy-efficiency and latency time on a specific hardware (Wang et al., 2018a). Finally, the memory overhead of storing the full activations is negligible compared to the storage of the weights for two reasons. First, in realistic real-time inference setups, the batch size is almost always equal to one. Second, a forward pass only requires to store the activations of the current layer –which are often smaller than the size of the input– and not the whole activations of the network.
39
+
40
+ # 3 OUR APPROACH
41
+
42
+ In this section, we describe our strategy for network compression and we show how to extend our approach to quantize a modern ConvNet architecture. The specificity of our approach is that it aims at a small reconstruction error for the outputs of the layer rather than the layer weights themselves. We first describe how we quantize a single fully connected and convolutional layer. Then we describe how we quantize a full pre-trained network and finetune it.
43
+
44
+ # 3.1 QUANTIZATION OF A FULLY-CONNECTED LAYER
45
+
46
+ We consider a fully-connected layer with weights $\mathbf{W} \in \mathbf{R}^{C_{\mathrm{in}} \times C_{\mathrm{out}}}$ and, without loss of generality, we omit the bias since it does not impact reconstruction error.
47
+
48
+ Product Quantization (PQ). Applying the PQ algorithm to the columns of $\mathbf{W}$ consists in evenly splitting each column into $m$ contiguous subvectors and learning a codebook on the resulting $mC_{\mathrm{out}}$ subvectors. Then, a column of $\mathbf{W}$ is quantized by mapping each of its subvector to its nearest codeword in the codebook. For simplicity, we assume that $C_{\mathrm{in}}$ is a multiple of $m$ , i.e., that all the subvectors have the same dimension $d = C_{\mathrm{in}} / m$ .
49
+
50
+ More formally, the codebook $\mathcal{C} = \{\mathbf{c}_1,\dots ,\mathbf{c}_k\}$ contains $k$ codewords of dimension $d$ . Any column $\mathbf{w}_j$ of $\mathbf{W}$ is mapped to its quantized version $\mathbf{q}(\mathbf{w}_j) = (\mathbf{c}_{i_1},\ldots ,\mathbf{c}_{i_m})$ where $i_{1}$ denotes the index of the codeword assigned to the first subvector of $\mathbf{w}_j$ , and so forth. The codebook is then learned by minimizing the following objective function:
51
+
52
+ $$
53
+ \left\| \mathbf {W} - \widehat {\mathbf {W}} \right\| _ {2} ^ {2} = \sum_ {j} \left\| \mathbf {w} _ {j} - \mathbf {q} \left(\mathbf {w} _ {j}\right) \right\| _ {2} ^ {2}, \tag {1}
54
+ $$
55
+
56
+ where $\widehat{\mathbf{W}}$ denotes the quantized weights. This objective can be efficiently minimized with $k$ -means. When $m$ is set to 1, PQ is equivalent to vector quantization (VQ) and when $m$ is equal to $C_{\mathrm{in}}$ , it is the scalar $k$ -means algorithm. The main benefit of PQ is its expressivity: each column $\mathbf{w}_j$ is mapped to a vector in the product $\mathcal{C} = \mathcal{C}\times \dots \times \mathcal{C}$ , thus PQ generates an implicit codebook of size $k^m$ .
57
+
58
+ Our algorithm. PQ quantizes the weight matrix of the fully-connected layer. However, in practice, we are interested in preserving the output of the layer, not its weights. This is illustrated in the case of a non-linear classifier in Figure 1: preserving the weights a layer does not necessarily guarantee preserving its output. In other words, the Frobenius approximation of the weights of a layer is not guaranteed to be the best approximation of the output over some arbitrary domain (in particular for in-domain inputs). We thus propose an alternative to PQ that directly minimizes the reconstruction error on the output activations obtained by applying the layer to in-domain inputs. More precisely, given a batch of $B$ input activations $\mathbf{x} \in \mathbf{R}^{B \times C_{\mathrm{in}}}$ , we are interested in learning a codebook $\mathcal{C}$
59
+
60
+ that minimizes the difference between the output activations and their reconstructions:
61
+
62
+ $$
63
+ \left\| \mathbf {y} - \widehat {\mathbf {y}} \right\| _ {2} ^ {2} = \sum_ {j} \left\| \mathbf {x} \left(\mathbf {w} _ {j} - \mathbf {q} \left(\mathbf {w} _ {j}\right)\right) \right\| _ {2} ^ {2}, \tag {2}
64
+ $$
65
+
66
+ where $\mathbf{y} = \mathbf{x}\mathbf{W}$ is the output and $\widehat{\mathbf{y}} = \mathbf{x}\widehat{\mathbf{W}}$ its reconstruction. Our objective is a re-weighting of the objective in Equation (1). We can thus learn our codebook with a weighted $k$ -means algorithm. First, we unroll $\mathbf{x}$ of size $B\times C_{\mathrm{in}}$ into $\widetilde{\mathbf{x}}$ of size $(B\times m)\times d$ i.e. we split each row of $\mathbf{x}$ into $m$ subvectors of size $d$ and stack these subvectors. Next, we adapt the EM algorithm as follows.
67
+
68
+ (1) E-step (cluster assignment). Recall that every column $\mathbf{w}_j$ is divided into $m$ subvectors of dimension $d$ . Each subvector $\mathbf{v}$ is assigned to the codeword $\mathbf{c}_j$ such that
69
+
70
+ $$
71
+ \mathbf {c} _ {j} = \underset {\mathbf {c} \in \mathcal {C}} {\operatorname {a r g m i n}} \| \widetilde {\mathbf {x}} (\mathbf {c} - \mathbf {v}) \| _ {2} ^ {2}. \tag {3}
72
+ $$
73
+
74
+ This step is performed by exhaustive exploration. Our implementation relies on broadcasting to be computationally efficient.
75
+
76
+ (2) M-step (codeword update). Let us consider a codeword $\mathbf{c} \in \mathcal{C}$ . We denote $(\mathbf{v}_p)_{p \in I_{\mathbf{c}}}$ the subvectors that are currently assigned to $\mathbf{c}$ . Then, we update $\mathbf{c} \gets \mathbf{c}^{\star}$ , where
77
+
78
+ $$
79
+ \mathbf {c} ^ {\star} = \underset {\mathbf {c} \in \mathbf {R} ^ {d}} {\operatorname {a r g m i n}} \sum_ {p \in I _ {\mathbf {c}}} \| \widetilde {\mathbf {x}} (\mathbf {c} - \mathbf {v} _ {p}) \| _ {2} ^ {2}. \tag {4}
80
+ $$
81
+
82
+ This step explicitly computes the solution of the least-squares problem<sup>2</sup>. Our implementation performs the computation of the pseudo-inverse of $\widetilde{\mathbf{x}}$ before alternating between the Expectation and Minimization steps as it does not depend on the learned codebook $\mathcal{C}$ .
83
+
84
+ We initialize the codebook $\mathcal{C}$ by uniformly sampling $k$ vectors among those we wish to quantize. After performing the E-step, some clusters may be empty. To resolve this issue, we iteratively perform the following additional steps for each empty cluster of index $i$ . (1) Find codeword $\mathbf{c}_0$ corresponding to the most populated cluster; (2) define new codewords $\mathbf{c}_0' = \mathbf{c}_0 + \mathbf{e}$ and $\mathbf{c}_i' = \mathbf{c}_0 - \mathbf{e}$ , where $\mathbf{e} \sim \mathcal{N}(\mathbf{0}, \varepsilon \mathbf{I})$ and (3) perform again the E-step. We proceed to the M-step after all the empty clusters are resolved. We set $\varepsilon = 1\mathrm{e} - 8$ and we observe that its generally takes less than 1 or 2 E-M iterations to resolve all the empty clusters. Note that the quality of the resulting compression is sensitive to the choice of $\mathbf{x}$ .
85
+
86
+ # 3.2 CONVOLUTIONAL LAYERS
87
+
88
+ Despite being presented in the case of a fully-connected layer, our approach works on any set of vectors. As a consequence, our approoach can be applied to a convolutional layer if we split the associated 4D weight matrix into a set of vectors. There are many ways to split a 4D matrix in a set of vectors and we are aiming for one that maximizes the correlation between the vectors since vector quantization based methods work the best when the vectors are highly correlated.
89
+
90
+ Given a convolutional layer, we have $C_{\mathrm{out}}$ filters of size $K\times K\times C_{\mathrm{in}}$ , leading to an overall 4D weight matrix $\mathbf{W}\in \mathbf{R}^{C_{\mathrm{out}}\times C_{\mathrm{in}}\times K\times K}$ . The dimensions along the output and input coordinate have no particular reason to be correlated. On the other hand, the spatial dimensions related to the filter size are by nature very correlated: nearby patches or pixels likely share information. As depicted in Figure 2, we thus reshape the weight matrix in a way that lead to spatially coherent quantization. More precisely, we quantize $\mathbf{W}$ spatially into subvectors of size $d = K\times K$ using the following procedure. We first reshape $\mathbf{W}$ into a 2D matrix of size $(C_{\mathrm{in}}\times K\times K)\times C_{\mathrm{out}}$ . Column $j$ of the reshaped matrix $\mathbf{W}_{\mathrm{r}}$ corresponds to the $j^{\mathrm{th}}$ filter of $\mathbf{W}$ and is divided into $C_{\mathrm{in}}$ subvectors of size $K\times K$ . Similarly, we reshape the input activations $\mathbf{x}$ accordingly to $\mathbf{x}_{\mathrm{r}}$ so that reshaping back the matrix $\mathbf{x}_{\mathrm{r}}\mathbf{W}_{\mathrm{r}}$ yields the same result as $\mathbf{x}*\mathbf{W}$ . In other words, we adopt a dual approach to the one using bi-level Toeplitz matrices to represent the weights. Then, we apply our method exposed in Section 3.1 to quantize each column of $\mathbf{W}_{\mathrm{r}}$ into $m = C_{\mathrm{in}}$ subvectors of size $d = K\times K$ with $k$ codewords, using $\mathbf{x}_{\mathrm{r}}$ as input activations in (2). As a natural extension, we also quantize with larger subvectors, for example subvectors of size $d = 2\times K\times K$ , see Section 4 for details.
91
+
92
+ ![](images/eaf4edb4795996ba60ba907c24a18819f722eda59eb054566c42640a1f0d84d7.jpg)
93
+ Figure 2: We quantize $C_{\mathrm{out}}$ filters of size $C_{\mathrm{in}} \times K \times K$ using a subvector size of $d = K \times K$ . In other words, we spatially quantize the convolutional filters to take advantage of the redundancy of information in the network. Similar colors denote subvectors assigned to the same codewords.
94
+
95
+ ![](images/773452dc0c965b129cbab8a4700d09d7ca2975c121974d07df03a4c6c15cbe3c.jpg)
96
+
97
+ ![](images/3209edcff003e01b0014615b9bcafd60f5d42ecda4eef060639c6be29e1ab80f.jpg)
98
+
99
+ In our implementation, we adapt the reshaping of $\mathbf{W}$ and $\mathbf{x}$ to various types of convolutions. We account for the padding, the stride, the number of groups (for depthwise convolutions and in particular for pointwise convolutions) and the kernel size. We refer the reader to the code for more details.
100
+
101
+ # 3.3 NETWORK QUANTIZATION
102
+
103
+ In this section, we describe our approach for quantizing a neural network. We quantize the network sequentially starting from the lowest layer to the highest layer. We guide the compression of the student network by the non-compressed teacher network, as detailed below.
104
+
105
+ Learning the codebook. We recover the current input activations of the layer, i.e. the input activations obtained by forwarding a batch of images through the quantized lower layers, and we quantize the current layer using those activations. Experimentally, we observed a drift in both the reconstruction and classification errors when using the activations of the non-compressed network rather than the current activations.
106
+
107
+ Finetuning the codebook. We finetune the codewords by distillation (Hinton et al., 2014) using the non-compressed network as the teacher network and the compressed network (up to the current layer) as the student network. Denoting $y_{\mathrm{t}}$ (resp. $y_{\mathrm{s}}$ ) the output probabilities of the teacher (resp. student) network, the loss we optimize is the Kullback-Leibler divergence $\mathcal{L} = \mathrm{KL}(\mathbf{y}_{\mathrm{s}},\mathbf{y}_{\mathrm{t}})$ . Finetuning on codewords is done by averaging the gradients of each subvector assigned to a given codeword. More formally, after the quantization step, we fix the assignments once for all. Then, denoting $(\mathbf{b}_p)_{p\in I_{\mathbf{c}}}$ the subvectors that are assigned to codeword $\mathbf{c}$ , we perform the SGD update with a learning rate $\eta$
108
+
109
+ $$
110
+ \mathbf {c} \leftarrow \mathbf {c} - \eta \frac {1}{\left| I _ {\mathbf {c}} \right|} \sum_ {p \in I _ {\mathbf {c}}} \frac {\partial \mathcal {L}}{\partial \mathbf {b} _ {p}}. \tag {5}
111
+ $$
112
+
113
+ Experimentally, we find the approach to perform better than finetuning on the target of the images as demonstrated in Table 3. Moreover, this approach does not require any labelled data.
114
+
115
+ # 3.4 GLOBAL FINETUNING
116
+
117
+ In a final step, we globally finetune the codebooks of all the layers to reduce any residual drifts and we update the running statistics of the BatchNorm layers: We empirically find it beneficial to finetune all the centroids after the whole network is quantized. The finetuning procedure is exactly the same as described in Section 3.3, except that we additionally switch the BatchNorms to the training mode, meaning that the learnt coefficients are still fixed but that the batch statistics (running mean and variance) are still being updated with the standard moving average procedure.
118
+
119
+ We perform the global finetuning using the standard ImageNet training set for 9 epochs with an initial learning rate of 0.01, a weight decay of $10^{-4}$ and a momentum of 0.9. The learning rate is decayed by a factor 10 every 3 epochs. As demonstrated in the ablation study in Table 3, finetuning on the true labels performs worse than finetuning by distillation. A possible explanation is that the supervision signal coming from the teacher network is richer than the one-hot vector used as a traditional learning signal in supervised learning (Hinton et al., 2014).
120
+
121
+ # 4 EXPERIMENTS
122
+
123
+ # 4.1 EXPERIMENTAL SETUP
124
+
125
+ We quantize vanilla ResNet-18 and ResNet-50 architectures pretrained on the ImageNet dataset (Deng et al., 2009). Unless explicit mention of the contrary, the pretrained models are taken from the PyTorch model zoo<sup>3</sup>. We run our method on a 16 GB Volta V100 GPU. Quantizing a ResNet-50 with our method (including all finetuning steps) takes about one day on 1 GPU. We detail our experimental setup below. Our code and the compressed models are open-sourced.
126
+
127
+ Compression regimes. We explore a large block sizes (resp. small block sizes) compression regime by setting the subvector size of regular $3 \times 3$ convolutions to $d = 9$ (resp. $d = 18$ ) and the subvector size of pointwise convolutions to $d = 4$ (resp. $d = 8$ ). For ResNet-18, the block size of pointwise convolutions is always equal to 4. The number of codewords or centroids is set to $k \in \{256, 512, 1024, 2048\}$ for each compression regime. Note that we clamp the number of centroids to $\min(k, C_{\mathrm{out}} \times m/4)$ for stability. For instance, the first layer of the first stage of the ResNet-50 has size $64 \times 64 \times 1 \times 1$ , thus we always use $k = 128$ centroids with a block size $d = 8$ . For a given number of centroids $k$ , small blocks lead to a lower compression ratio than large blocks.
128
+
129
+ Sampling the input activations. Before quantizing each layer, we randomly sample a batch of 1024 training images to obtain the input activations of the current layer and reshape it as described in Section 3.2. Then, before each iteration (E+M step) of our method, we randomly sample 10,000 rows from those reshaped input activations.
130
+
131
+ Hyperparameters. We quantize each layer while performing 100 steps of our method (sufficient for convergence in practice). We finetune the centroids of each layer on the standard ImageNet training set during 2,500 iterations with a batch size of 128 (resp 64) for the ResNet-18 (resp.ResNet-50) with a learning rate of 0.01, a weight decay of $10^{-4}$ and a momentum of 0.9. For accuracy and memory reasons, the classifier is always quantized with a block size $d = 4$ and $k = 2048$ (resp. $k = 1024$ ) centroids for the ResNet-18 (resp., ResNet-50). Moreover, the first convolutional layer of size $7 \times 7$ is not quantized, as it represents less than $0.1\%$ (resp., $0.05\%$ ) of the weights of a ResNet-18 (resp.ResNet-50).
132
+
133
+ Metrics. We focus on the tradeoff between accuracy and memory. The accuracy is the top-1 error on the standard validation set of ImageNet. The memory footprint is calculated as the indexing cost (number of bits per weight) plus the overhead of storing the centroids in float16. As an example, quantizing a layer of size $128 \times 128 \times 3 \times 3$ with $k = 256$ centroids (1 byte per subvector) and a block size of $d = 9$ leads to an indexing cost of $16\mathrm{kB}$ for $m = 16,384$ blocks plus the cost of storing the centroids of $4.5\mathrm{kB}$ .
134
+
135
+ # 4.2 IMAGE CLASSIFICATION RESULTS
136
+
137
+ We report below the results of our method applied to various ResNet models. First, we compare our method with the state of the art on the standard ResNet-18 and ResNet-50 architecture. Next, we show the potential of our approach on a competitive ResNet-50. Finally, an ablation study validates the pertinence of our method.
138
+
139
+ Vanilla ResNet-18 and ResNet-50. We evaluate our method on the ImageNet benchmark for ResNet-18 and ResNet-50 architectures and compare our results to the following methods: Trained Ternary Quantization (TTQ) (Zhu et al., 2016), LR-Net (Shayer et al., 2017), ABC-Net (Lin et al., 2017), Binary Weight Network (XNOR-Net or BWN) (Rastegari et al., 2016), Deep Compression (DC) (Han et al., 2016) and Hardware-Aware Automated Quantization (HAQ) (Wang et al., 2018a). We report the accuracies and compression factors in the original papers and/or in the two surveys (Guo, 2018; Cheng et al., 2017) for a given architecture when the result is available. We do not compare our method to DoReFa-Net (Zhou et al., 2016) and WRPN (Mishra et al., 2017) as those approaches also use low-precision activations and hence get lower accuracies, e.g., $51.2\%$ top-1 accuracy for a XNOR-Net with ResNet-18. The results are presented in Figure 4.2. For better readability, some results for our method are also displayed in Table 1. We report the average accuracy and standard deviation over 3 runs. Our method significantly outperforms state of the art papers for
140
+
141
+ ![](images/cbf756c3f7df7b8154c6ff54daf2111274e54972cc40107e8ecd27d7713f21ba.jpg)
142
+ Figure 3: Compression results for ResNet-18 and ResNet-50 architectures. We explore two compression regimes as defined in Section 4.1: small block sizes (block sizes of $d = 4$ and 9) and large block sizes (block sizes $d = 8$ and 18). The results of our method for $k = 256$ centroids are of practical interest as they correspond to a byte-compatible compression scheme.
143
+
144
+ ![](images/b08501c4f9b543ab725be5bfae4571aa6f1e1b38db25a2908adc02bd099edb0a.jpg)
145
+
146
+ Table 1: Results for vanilla ResNet-18 and ResNet-50 architectures for $k = {256}$ centroids.
147
+
148
+ <table><tr><td>Model (original top-1)</td><td>Compression</td><td>Size ratio</td><td>Model size</td><td>Top-1 (%)</td></tr><tr><td rowspan="2">ResNet-18 (69.76%)</td><td>Small blocks</td><td>29x</td><td>1.54 MB</td><td>65.81 ±0.04</td></tr><tr><td>Large blocks</td><td>43x</td><td>1.03 MB</td><td>61.10 ±0.03</td></tr><tr><td rowspan="2">ResNet-50 (76.15%)</td><td>Small blocks</td><td>19x</td><td>5.09 MB</td><td>73.79 ±0.05</td></tr><tr><td>Large blocks</td><td>31x</td><td>3.19 MB</td><td>68.21 ±0.04</td></tr></table>
149
+
150
+ various operating points. For instance, for a ResNet-18, our method with large blocks and $k = 512$ centroids reaches a larger accuracy than ABC-Net ( $M = 2$ ) with a compression ratio that is 2x larger. Similarly, on the ResNet-50, our compressed model with $k = 256$ centroids in the large blocks setup yields a comparable accuracy to DC (2 bits) with a compression ratio that is 2x larger.
151
+
152
+ The work by Tung & Mori (Tung & Mori, 2018) is likely the only one that remains competitive with ours with a 6.8 MB network after compression, with a technique that prunes the network and therefore implicitly changes the architecture. The authors report the delta accuracy for which we have no direct comparable top-1 accuracy, but their method is arguably complementary to ours.
153
+
154
+ Semi-supervised ResNet-50. Recent works (Mahajan et al., 2018; Yalniz et al., 2019) have demonstrated the possibility to leverage a large collection of unlabelled images to improve the performance of a given architecture. In particular, Yalniz et al. (Yalniz et al., 2019) use the publicly available YFCC-100M dataset (Thomee et al., 2015) to train a ResNet-50 that reaches $79.1\%$ top-1 accuracy on the standard validation set of ImageNet. In the following, we use this particular model and refer to it as semi-supervised ResNet-50. In the low compression regime (block sizes of 4 and 9), with $k = 256$ centroids (practical for implementation), our compressed semi-supervised ResNet-50 reaches $76.12\%$ top-1 accuracy. In other words, the model compressed to $5\mathrm{MB}$ attains the performance of a vanilla, non-compressed ResNet50 (vs.97.5MB for the non-compressed ResNet-50).
155
+
156
+ Comparison for a given size budget. To ensure a fair comparison, we compare our method for a given model size budget against the reference methods in Table 2. It should be noted that our method can further benefit from advances in semi-supervised learning to boosts the performance of the non-compressed and hence of the compressed network.
157
+
158
+ Ablation study. We perform an ablation study on the vanilla ResNet-18 to study the respective effects of quantizing using the activations and finetuning by distillation (here, finetuning refers both to the per-layer finetuning and to the global finetuning after the quantization described in Section 3). We refer to our method as Act + Distill. First, we still finetune by distillation but change the quantization: instead of quantizing using our method (see Equation (2)), we quantizing using the standard PQ algorithm and do not take the activations into account, see Equation (1). We refer to this method as No act + Distill. Second, we quantize using our method but perform a standard finetuning using
159
+
160
+ Table 2: Best test top-1 accuracy on ImageNet for a given size budget (no architecture constraint).
161
+
162
+ <table><tr><td>Size budget</td><td>Best previous published method</td><td>Ours</td></tr><tr><td>~1 MB</td><td>70.90% (HAQ (Wang et al., 2018a), MobileNet v2)</td><td>64.01% (vanilla ResNet-18)</td></tr><tr><td>~5 MB</td><td>71.74% (HAQ (Wang et al., 2018a), MobileNet v1)</td><td>76.12% (semi-sup.ResNet-50)</td></tr><tr><td>~10 MB</td><td>75.30% (HAQ (Wang et al., 2018a), ResNet-50)</td><td>77.85% (semi-sup.ResNet-50)</td></tr></table>
163
+
164
+ Table 3: Ablation study on ResNet-18 (test top-1 accuracy on ImageNet).
165
+
166
+ <table><tr><td>Compression</td><td>Centroids k</td><td>No act + Distill</td><td>Act + Labels</td><td>Act + Distill (ours)</td></tr><tr><td rowspan="4">Small blocks</td><td>256</td><td>64.76</td><td>65.55</td><td>65.81</td></tr><tr><td>512</td><td>66.31</td><td>66.82</td><td>67.15</td></tr><tr><td>1024</td><td>67.28</td><td>67.53</td><td>67.87</td></tr><tr><td>2048</td><td>67.88</td><td>67.99</td><td>68.26</td></tr><tr><td rowspan="4">Large blocks</td><td>256</td><td>60.46</td><td>61.01</td><td>61.18</td></tr><tr><td>512</td><td>63.21</td><td>63.67</td><td>63.99</td></tr><tr><td>1024</td><td>64.74</td><td>65.48</td><td>65.72</td></tr><tr><td>2048</td><td>65.94</td><td>66.21</td><td>66.50</td></tr></table>
167
+
168
+ the image labels (Act + Labels). The results are displayed in Table 3. Our approach consistently yields significantly better results. As a side note, quantizing all the layers of a ResNet-18 with the standard PQ algorithm and without any finetuning leads to top-1 accuracies below $25\%$ for all operating points, which illustrates the drift in accuracy occurring when compressing deep networks with standard methods (as opposed to our method).
169
+
170
+ # 4.3 IMAGE DETECTION RESULTS
171
+
172
+ To demonstrate the generality of our method, we compress the Mask R-CNN architecture used for image detection in many real-life applications (He et al., 2017). We compress the backbone (ResNet-50 FPN) in the small blocks compression regime and refer the reader to the open-sourced compressed model for the block sizes used in the various heads of the network. We use $k = 256$ centroids for every layer. We perform the fine-tuning (layer-wise and global) using distributed training on 8 V100 GPUs. Results are displayed in Table 4. We argue that this provides an interesting point of comparison for future work aiming at compressing such architectures for various applications.
173
+
174
+ # 5 CONCLUSION
175
+
176
+ We presented a quantization method based on Product Quantization that gives state of the art results on ResNet architectures and that generalizes to other architectures such as Mask R-CNN. Our compression scheme does not require labeled data and the resulting models are byte-aligned, allowing for efficient inference on CPU. Further research directions include testing our method on a wider variety of architectures. In particular, our method can be readily adapted to simultaneously compress and transfer ResNets trained on ImageNet to other domains. Finally, we plan to take the non-linearity into account to improve our reconstruction error.
177
+
178
+ Table 4: Compression results for Mask R-CNN (backbone ResNet-50 FPN) for $k = 256$ centroids (compression factor $26 \times$ ).
179
+
180
+ <table><tr><td>Model</td><td>Size</td><td>Box AP</td><td>Mask AP</td></tr><tr><td>Non-compressed</td><td>170 MB</td><td>37.9</td><td>34.6</td></tr><tr><td>Compressed</td><td>6.51 MB</td><td>33.9</td><td>30.8</td></tr></table>
181
+
182
+ # REFERENCES
183
+
184
+ David Berthelot, Nicholas Carlini, Ian Goodfellow, Nicolas Papernot, Avital Oliver, and Colin Raffel. Mixmatch: A holistic approach to semi-supervised learning. arXiv preprint arXiv:1905.02249, 2019.
185
+ Miguel Á. Carreira-Perpiñán and Yerlan Idelbayev. Model compression as constrained optimization, with application to neural nets. part ii: quantization, 2017.
186
+ Yu Cheng, Duo Wang, Pan Zhou, and Tao Zhang. A survey of model compression and acceleration for deep neural networks. CoRR, 2017.
187
+ Yoojin Choi, Mostafa El-Khamy, and Jungwon Lee. Towards the limit of network quantization. CoRR, 2016.
188
+ Matthieu Courbariaux, Yoshua Bengio, and Jean-Pierre David. Binaryconnect: Training deep neural networks with binary weights during propagations. CoRR, 2015.
189
+ J. Deng, W. Dong, R. Socher, L.-J. Li, K. Li, and L. Fei-Fei. ImageNet: A Large-Scale Hierarchical Image Database. In Conference on Computer Vision and Pattern Recognition, 2009.
190
+ Emily L Denton, Wojciech Zaremba, Joan Bruna, Yann LeCun, and Rob Fergus. Exploiting linear structure within convolutional networks for efficient evaluation. In Advances in Neural Information Processing Systems 27. 2014.
191
+ Tiezheng Ge, Kaiming He, Qifa Ke, and Jian Sun. Optimized product quantization. IEEE Trans. Pattern Anal. Mach. Intell., 2014.
192
+ Yunchao Gong, Liu Liu, Ming Yang, and Lubomir Bourdev. Compressing deep convolutional networks using vector quantization. arXiv preprint arXiv:1412.6115, 2014.
193
+ Yunhui Guo. A survey on methods and theories of quantized neural networks. CoRR, 2018.
194
+ Song Han, Huizi Mao, and William J. Dally. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. International Conference on Learning Representations, 2016.
195
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. CoRR, 2015.
196
+ Kaiming He, Georgia Gkioxari, Piotr Dollar, and Ross Girshick. Mask r-cnn. International Conference on Computer Vision (ICCV), 2017.
197
+ Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. Distilling the knowledge in a neural network. NIPS Deep Learning Workshop, 2014.
198
+ Andrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, and Hartwig Adam. Searching for mobilenetv3. arXiv e-prints, 2019.
199
+ Gao Huang, Zhuang Liu, Laurens van der Maaten, and Kilian Q. Weinberger. Densely connected convolutional networks. Conference on Computer Vision and Pattern Recognition, 2017.
200
+ Forrest Iandola, Song Han, Matthew W. Moskewicz, Khalid Ashraf, William Dally, and Kurt Keutzer. SqueezeNet: Alexnet-level accuracy with 50x fewer parameters and $\mathrm{j}0.5\mathrm{mb}$ model size. CoRR, 2016.
201
+ Herve Jegou, Matthijs Douze, and Cordelia Schmid. Product quantization for nearest neighbor search. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2011.
202
+ Hervé Jégou, Matthijs Douze, and Cordelia Schmid. Product Quantization for Nearest Neighbor Search. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2011.
203
+ Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. In Advances in Neural Information Processing Systems. 2012.
204
+
205
+ Yann LeCun, John S. Denker, and Sara A. Solla. Optimal brain damage. In Advances in Neural Information Processing Systems, 1990.
206
+ Fengfu Li and Bin Liu. Ternary weight networks. CoRR, 2016.
207
+ Xiaofan Lin, Cong Zhao, and Wei Pan. Towards accurate binary convolutional neural network. CoRR, 2017.
208
+ Zhuang Liu, Jianguo Li, Zhiqiang Shen, Gao Huang, Shoumeng Yan, and Changshui Zhang. Learning efficient convolutional networks through network slimming. International Conference on Computer Vision, 2017.
209
+ Raphael Gontijo Lopes, Stefano Fenu, and Thad Starner. Data-free knowledge distillation for deep neural networks, 2017.
210
+ Jian-Hao Luo, Jianxin Wu, and Weiyao Lin. Thinet: A filter level pruning method for deep neural network compression. CoRR, 2017.
211
+ Ningning Ma, Xiangyu Zhang, Hai-Tao Zheng, and Jian Sun. Shufflenet V2: practical guidelines for efficient CNN architecture design. CoRR, 2018.
212
+ Dhruv Mahajan, Ross B. Girshick, Vignesh Ramanathan, Kaiming He, Manohar Paluri, Yixuan Li, Ashwin Bharambe, and Laurens van der Maaten. Exploring the limits of weakly supervised pretraining. CoRR, 2018.
213
+ Mark D. McDonnell. Training wide residual networks for deployment using a single bit for each weight, 2018.
214
+ Asit K. Mishra and Debbie Marr. Apprentice: Using knowledge distillation techniques to improve low-precision network accuracy. CoRR, 2017.
215
+ Asit K. Mishra, Eriko Nurvitadhi, Jeffrey J. Cook, and Debbie Marr. WRPN: wide reduced-precision networks. CoRR, 2017.
216
+ Mohammad Norouzi and David J Fleet. Cartesian k-means. In Conference on Computer Vision and Pattern Recognition, 2013.
217
+ Mohammad Rastegari, Vicente Ordonez, Joseph Redmon, and Ali Farhadi. Xnor-net: Imagenet classification using binary convolutional neural networks. In European Conference on Computer Vision, 2016.
218
+ Mark Sandler, Andrew G. Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh Chen. Inverted residuals and linear bottlenecks: Mobile networks for classification, detection and segmentation. CoRR, 2018.
219
+ Oran Shayer, Dan Levi, and Ethan Fetaya. Learning discrete weights using the local reparameterization trick. CoRR, 2017.
220
+ Mingxing Tan and Quoc V. Le. Efficientnet: Rethinking model scaling for convolutional neural networks, 2019.
221
+ Bart Thomee, David A. Shamma, Gerald Friedland, Benjamin Elizalde, Karl Ni, Douglas Poland, Damian Borth, and Li-Jia Li. The new data and new challenges in multimedia research. CoRR, 2015.
222
+ Frederick Tung and Greg Mori. Deep neural network compression by in-parallel pruning-quantization. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2018.
223
+ Kuan Wang, Zhijian Liu, Yujun Lin andx Ji Lin, and Song Han. HAQ: hardware-aware automated quantization. CoRR, 2018a.
224
+ Kuan Wang, Zhijian Liu, Yujun Lin, Ji Lin, and Song Han. Haq: hardware-aware automated quantization. arXiv preprint arXiv:1811.08886, 2018b.
225
+
226
+ Saining Xie, Ross Girshick, Piotr Dollar, Zhuowen Tu, and Kaiming He. Aggregated residual transformations for deep neural networks. In Conference on Computer Vision and Pattern Recognition, 2017.
227
+ I. Zeki Yalniz, Hervé Jégou, Kan Chen, Manohar Paluri, and Dhruv Mahajan. Billion-scale semi-supervised learning for image classification. arXiv e-prints, 2019.
228
+ Xiangyu Zhang, Xinyu Zhou, Mengxiao Lin, and Jian Sun. Shufflenet: An extremely efficient convolutional neural network for mobile devices. CoRR, 2017.
229
+ Aojun Zhou, Anbang Yao, Yiwen Guo, Lin Xu, and Yurong Chen. Incremental network quantization: Towards lossless cnns with low-precision weights. CoRR, 2017.
230
+ Shuchang Zhou, Zekun Ni, Xinyu Zhou, He Wen, Yuxin Wu, and Yuheng Zou. Dorefa-net: Training low bandwidth convolutional neural networks with low bandwidth gradients. CoRR, 2016.
231
+ Chenzhuo Zhu, Song Han, Huizi Mao, and William J. Dally. Trained ternary quantization. CoRR, 2016.
232
+ Barret Zoph, Vijay Vasudevan, Jonathon Shlens, and Quoc V. Le. Learning transferable architectures for scalable image recognition. CoRR, 2017.
andthebitgoesdownrevisitingthequantizationofneuralnetworks/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f315482583c2e55455183dea8f3006ce07a976e06b2d07f26837b3c4008b249a
3
+ size 282172
andthebitgoesdownrevisitingthequantizationofneuralnetworks/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92dfb0fb74441d3d5a5a938e7071c4c84944189b17ece53d1f5a05259065f9b1
3
+ size 392892
anexponentiallearningrateschedulefordeeplearning/ea0ef8fc-140d-46a6-a3d9-e0ab973e218f_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73ea8a46e96f9ac3149797790a305dd12782875396fc44b7151214350a575b35
3
+ size 201950
anexponentiallearningrateschedulefordeeplearning/ea0ef8fc-140d-46a6-a3d9-e0ab973e218f_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa0eb1221b83840b44865ab7fd13d8f8a6774e559d9db76eeed8c5de1de34141
3
+ size 235965
anexponentiallearningrateschedulefordeeplearning/ea0ef8fc-140d-46a6-a3d9-e0ab973e218f_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85ea839ff1954b22d6bebb78427d3f1ca5951b3c4815db1e57bc98a05b839adf
3
+ size 5287592
anexponentiallearningrateschedulefordeeplearning/full.md ADDED
The diff for this file is too large to render. See raw diff
 
anexponentiallearningrateschedulefordeeplearning/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05de5f5636f2c95013a349ebb0c9bbf07cdcbe0aa0e49d7c5b3a8d4df248c407
3
+ size 1365350
anexponentiallearningrateschedulefordeeplearning/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9431e173c8ee74d9032cf1c75e1b637c4a7ed47b07fe125c52f9a04fe727199
3
+ size 1261193
aprobabilisticformulationofunsupervisedtextstyletransfer/8a8e36c5-cd0a-4d7b-8c4a-35b8b98e0c08_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df63c8340c8c341efa9e2a47720c5e489c9531c6941b97907d66f13f7f945122
3
+ size 83369
aprobabilisticformulationofunsupervisedtextstyletransfer/8a8e36c5-cd0a-4d7b-8c4a-35b8b98e0c08_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4009368ddddfa2d65b67117cba5e85407109d435b2b67f60d82b3fd31a71bd40
3
+ size 100563
aprobabilisticformulationofunsupervisedtextstyletransfer/8a8e36c5-cd0a-4d7b-8c4a-35b8b98e0c08_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0a74e2f27915c118b3c1c75376de07671f0210392ab26022d787e89ddbc78d7
3
+ size 695490
aprobabilisticformulationofunsupervisedtextstyletransfer/full.md ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A PROBABILISTIC FORMULATION OF UNSUPERVISED TEXT STYLE TRANSFER
2
+
3
+ Junxian He*, Xinyi Wang*, Graham Neubig
4
+
5
+ Carnegie Mellon University
6
+
7
+ {junxianh,xinyiw1,gneubig}@cs.cmu.edu
8
+
9
+ Taylor Berg-Kirkpatrick
10
+
11
+ University of California San Diego
12
+
13
+ tberg@eng.ucsd.edu
14
+
15
+ # ABSTRACT
16
+
17
+ We present a deep generative model for unsupervised text style transfer that unifies previously proposed non-generative techniques. Our probabilistic approach models non-parallel data from two domains as a partially observed parallel corpus. By hypothesizing a parallel latent sequence that generates each observed sequence, our model learns to transform sequences from one domain to another in a completely unsupervised fashion. In contrast with traditional generative sequence models (e.g. the HMM), our model makes few assumptions about the data it generates: it uses a recurrent language model as a prior and an encoder-decoder as a transduction distribution. While computation of marginal data likelihood is intractable in this model class, we show that amortized variational inference admits a practical surrogate. Further, by drawing connections between our variational objective and other recent unsupervised style transfer and machine translation techniques, we show how our probabilistic view can unify some known non-generative objectives such as backtranslation and adversarial loss. Finally, we demonstrate the effectiveness of our method on a wide range of unsupervised style transfer tasks, including sentiment transfer, formality transfer, word decipherment, author imitation, and related language translation. Across all style transfer tasks, our approach yields substantial gains over state-of-the-art non-generative baselines, including the state-of-the-art unsupervised machine translation techniques that our approach generalizes. Further, we conduct experiments on a standard unsupervised machine translation task and find that our unified approach matches the current state-of-the-art.<sup>1</sup>
18
+
19
+ # 1 INTRODUCTION
20
+
21
+ Text sequence transduction systems convert a given text sequence from one domain to another. These techniques can be applied to a wide range of natural language processing applications such as machine translation (Bahdanau et al., 2015), summarization (Rush et al., 2015), and dialogue response generation (Zhao et al., 2017). In many cases, however, parallel corpora for the task at hand are scarce. Therefore, unsupervised sequence transduction methods that require only non-parallel data are appealing and have been receiving growing attention (Bannard & Callison-Burch, 2005; Ravi & Knight, 2011; Mizukami et al., 2015; Shen et al., 2017; Lample et al., 2018; 2019). This trend is most pronounced in the space of text style transfer tasks where parallel data is particularly challenging to obtain (Hu et al., 2017; Shen et al., 2017; Yang et al., 2018). Style transfer has historically referred to sequence transduction problems that modify superficial properties of text - i.e. style rather than content. We focus on a standard suite of style transfer tasks, including formality transfer (Rao & Tetreault, 2018), author imitation (Xu et al., 2012), word decipherment (Shen et al., 2017), sentiment transfer (Shen et al., 2017), and related language translation (Pourdamghani & Knight, 2017). General unsupervised translation has not typically been considered style transfer, but for the purpose of comparison we also conduct evaluation on this task (Lample et al., 2017).
22
+
23
+ Recent work on unsupervised text style transfer mostly employs non-generative or non-probabilistic modeling approaches. For example, Shen et al. (2017) and Yang et al. (2018) design adversarial discriminators to shape their unsupervised objective - an approach that can be effective, but often introduces training instability. Other work focuses on directly designing unsupervised training objectives by incorporating intuitive loss terms (e.g. backtranslation loss), and demonstrates state-of-the-art performance on unsupervised machine translation (Lample et al., 2018; Artetxe et al., 2019) and style transfer (Lample et al., 2019). However, the space of possible unsupervised objectives is extremely large and the underlying modeling assumptions defined by each objective can only be reasoned about indirectly. As a result, the process of designing such systems is often heuristic.
24
+
25
+ In contrast, probabilistic models (e.g. the noisy channel model (Shannon, 1948)) define assumptions about data more explicitly and allow us to reason about these assumptions during system design. Further, the corresponding objectives are determined naturally by principles of probabilistic inference, reducing the need for empirical search directly in the space of possible objectives. That said, classical probabilistic models for unsupervised sequence transduction (e.g. the HMM or semi-HMM) typically enforce overly strong independence assumptions about data to make exact inference tractable (Knight et al., 2006; Ravi & Knight, 2011; Pourdamghani & Knight, 2017). This has restricted their development and caused their performance to lag behind unsupervised neural objectives on complex tasks. Luckily, in recent years, powerful variational approximation techniques have made it more practical to train probabilistic models without strong independence assumptions (Miao & Blunsom, 2016; Yin et al., 2018). Inspired by this, we take a new approach to unsupervised style transfer.
26
+
27
+ We directly define a generative probabilistic model that treats a non-parallel corpus in two domains as a partially observed parallel corpus. Our model makes few independent assumptions and its true posterior is intractable. However, we show that by using amortized variational inference (Kingma & Welling, 2013), a principled probabilistic technique, a natural unsupervised objective falls out of our modeling approach that has many connections with past work, yet is different from all past work in specific ways. In experiments across a suite of unsupervised text style transfer tasks, we find that the natural objective of our model actually outperforms all manually defined unsupervised objectives from past work, supporting the notion that probabilistic principles can be a useful guide even in deep neural systems. Further, in the case of unsupervised machine translation, our model matches the current state-of-the-art non-generative approach.
28
+
29
+ # 2 UNSUPERVISED TEXT STYLE TRANSFER
30
+
31
+ We first overview text style transfer, which aims to transfer a text (typically a single sentence or a short paragraph – for simplicity we refer to simply “sentences” below) from one domain to another while preserving underlying content. For example, formality transfer (Rao & Tetreault, 2018) is the task of transforming the tone of text from informal to formal without changing its content. Other examples include sentiment transfer (Shen et al., 2017), word decipherment (Knight et al., 2006), and author imitation (Xu et al., 2012). If parallel examples were available from each domain (i.e. the training data is a bitext consisting of pairs of sentences from each domain), supervised techniques could be used to perform style transfer (e.g. attentional Seq2Seq (Bahdanau et al., 2015) and Transformer (Vaswani et al., 2017)). However, for most style transfer problems, only non-parallel corpora (one corpus from each domain) can be easily collected. Thus, work on style transfer typically focuses on the more difficult unsupervised setting where systems must learn from non-parallel data alone.
32
+
33
+ The model we propose treats an observed non-parallel text corpus as a partially observed parallel corpus. Thus, we introduce notation for both observed text inputs and those that we will treat as latent variables. Specifically, we let $X = \{x^{(1)}, x^{(2)}, \dots, x^{(m)}\}$ represent observed data from domain $\mathcal{D}_1$ , while we let $Y = \{y^{(m+1)}, y^{(m+2)}, \dots, y^{(n)}\}$ represent observed data from domain $\mathcal{D}_2$ . Corresponding indices represent parallel sentences. Thus, none of the observed sentences share indices. In our model, we introduce latent sentences to complete the parallel corpus. Specifically, $\bar{X} = \{\bar{x}^{(m+1)}, \bar{x}^{(m+2)}, \dots, \bar{x}^{(n)}\}$ represents the set of latent parallel sentences in $\mathcal{D}_1$ , while $\bar{Y} = \{\bar{y}^{(1)}, \bar{y}^{(2)}, \dots, \bar{y}^{(m)}\}$ represents the set of latent parallel sentences in $\mathcal{D}_2$ . Then the goal of unsupervised text transduction is to infer these latent variables conditioned the observed non-parallel corpora; that is, to learn $p(\bar{y} | x)$ and $p(\bar{x} | y)$ .
34
+
35
+ ![](images/30ae366257b88b9fe9fd6b165524bb20124f9b225e1ca483ad168f2460e47f10.jpg)
36
+ Figure 1: Proposed graphical model for style transfer via bitext completion. Shaded circles denote the observed variables and unshaded circles denote the latents. The generator is parameterized as an encoder-decoder architecture and the prior on the latent variable is a pretrained language model.
37
+
38
+ # 3 THE DEEP LATENT SEQUENCE MODEL
39
+
40
+ First we present our generative model of bitext, which we refer to as a deep latent sequence model. We then describe unsupervised learning and inference techniques for this model class.
41
+
42
+ # 3.1 MODEL STRUCTURE
43
+
44
+ Directly modeling $p(\bar{y} |x)$ and $p(\bar{x} |y)$ in the unsupervised setting is difficult because we never directly observe parallel data. Instead, we propose a generative model of the complete data that defines a joint likelihood, $p(X,\bar{X},Y,\bar{Y})$ . In order to perform text transduction, the unobserved halves can be treated as latent variables: they will be marginalized out during learning and inferred via posterior inference at test time.
45
+
46
+ Our model assumes that each observed sentence is generated from an unobserved parallel sentence in the opposite domain, as depicted in Figure 1. Specifically, each sentence $x^{(i)}$ in domain $\mathcal{D}_1$ is generated as follows: First, a latent sentence $\bar{y}^{(i)}$ in domain $\mathcal{D}_2$ is sampled from a prior, $p_{\mathcal{D}_2}(\bar{y}^{(i)})$ . Then, $x^{(i)}$ is sampled conditioned on $\bar{y}^{(i)}$ from a transduction model, $p(x^{(i)}|\bar{y}^{(i)})$ . Similarly, each observed sentence $y^{(j)}$ in domain $\mathcal{D}_2$ is generated conditioned on a latent sentence, $\bar{x}^{(j)}$ , in domain $\mathcal{D}_1$ via the opposite transduction model, $p(y^{(j)}|\bar{x}^{(j)})$ , and prior, $p_{\mathcal{D}_1}(\bar{x}^{(j)})$ . We let $\theta_{x| \bar{y}}$ and $\theta_{y| \bar{x}}$ represent the parameters of the two transduction distributions respectively. We assume the prior distributions are pretrained on the observed data in their respective domains and therefore omit their parameters for simplicity of notation. Together, this gives the following joint likelihood:
47
+
48
+ $$
49
+ p \left(X, \bar {X}, Y, \bar {Y}; \theta_ {x | \bar {y}}, \theta_ {y | \bar {x}}\right) = \left(\prod_ {i = 1} ^ {m} p \left(x ^ {(i)} \mid \bar {y} ^ {(i)}; \theta_ {x | \bar {y}}\right) p _ {\mathcal {D} _ {2}} \left(\bar {y} ^ {(i)}\right)\right) \left(\prod_ {j = m + 1} ^ {n} p \left(y ^ {(j)} \mid \bar {x} ^ {(j)}; \theta_ {y | \bar {x}}\right) p _ {\mathcal {D} _ {1}} \left(\bar {x} ^ {(j)}\right)\right) \tag {1}
50
+ $$
51
+
52
+ The log marginal likelihood of the data, which we will approximate during training, is:
53
+
54
+ $$
55
+ \log p (X, Y; \theta_ {x | \bar {y}}, \theta_ {y | \bar {x}}) = \log \sum_ {\bar {X}} \sum_ {\bar {Y}} p (X, \bar {X}, Y, \bar {Y}; \theta_ {x | \bar {y}}, \theta_ {y | \bar {x}}) \tag {2}
56
+ $$
57
+
58
+ Note that if the two transduction models share no parameters, the training problems for each observed domain are independent. Critically, we introduce parameter sharing through our variational inference procedure, which we describe in more detail in Section 3.2.
59
+
60
+ Architecture: Since we would like to be able to model a variety of transfer tasks, we choose a parameterization for our transduction distributions that makes no independence assumptions. Specifically, we employ an encoder-decoder architecture based on the standard attentional Seq2Seq model which has been shown to be successful across various tasks (Bahdanau et al., 2015; Rush et al., 2015). Similarly, our prior distributions for each domain are parameterized as recurrent language models which, again, make no independence assumptions. In contrast, traditional unsupervised generative sequence models typically make strong independence assumptions to enable exact inference (e.g. the HMM makes a Markov assumption on the latent sequence and emissions are one-to-one). Our model is more flexible, but exact inference via dynamic programming will be intractable. We address this problem in the next section.
61
+
62
+ ![](images/ea70c00dd49a21d384a7194db94fc3f2840641fefca3e4a40c32c3adb7c6055c.jpg)
63
+ Figure 2: Depiction of amortized variational approximation. Distributions $q(\bar{y} |x)$ and $q(\bar{x} |y)$ represent inference networks that approximate the model's true posterior. Critically, parameters are shared between the generative model and inference networks to tie the learning problems for both domains.
64
+
65
+ # 3.2 LEARNING
66
+
67
+ Ideally, learning should directly optimize the log data likelihood, which is the marginal of our model shown in Eq. 2. However, due to our model's neural parameterization which does not factorize, computing the data likelihood cannot be accomplished using dynamic programming as can be done with simpler models like the HMM. To overcome the intractability of computing the true data likelihood, we adopt amortized variational inference (Kingma & Welling, 2013) in order to derive a surrogate objective for learning, the evidence lower bound (ELBO) on log marginal likelihood<sup>3</sup>:
68
+
69
+ $$
70
+ \begin{array}{l} \log p (X, Y; \theta_ {x | \bar {y}}, \theta_ {y | \bar {x}}) \\ \geq \mathcal {L} _ {\mathrm {E L B O}} (X, Y; \theta_ {x | \bar {y}}, \theta_ {y | \bar {x}}, \phi_ {\bar {x}} [ y, \phi_ {\bar {y} | x}) \\ = \sum_ {i} \left[ \mathbb {E} _ {q (\bar {y} | x ^ {(i)}; \phi_ {\bar {y} | x})} [ \log p (x ^ {(i)} | \bar {y}; \theta_ {x | \bar {y}}) ] - D _ {\mathrm {K L}} \left(q (\bar {y} | x ^ {(i)}; \phi_ {\bar {y} | x}) | | p _ {\mathcal {D} _ {2}} (\bar {y})\right) \right] \tag {3} \\ + \sum_ {j} \underbrace {\left[ \mathbb {E} _ {q (\bar {x} | y ^ {(j)} ; \phi_ {\bar {x} | y})} [ \log p (y ^ {(j)} | \bar {x} ; \theta_ {y | \bar {x}}) ] - \underbrace {D _ {\mathrm {K L}} \left(q (\bar {x} | y ^ {(j)} ; \phi_ {\bar {x} | y}) | | p _ {\mathcal {D} _ {1}} (\bar {x})\right) \right]} _ {\text {R e c o n s t r u c t i o n l i k e l i h o o d}} \\ \end{array}
71
+ $$
72
+
73
+ The surrogate objective introduces $q(\bar{y} | x^{(i)}; \phi_{\bar{y} | x})$ and $q(\bar{x} | y^{(j)}; \phi_{\bar{x} | y})$ , which represent two separate inference network distributions that approximate the model's true posteriors, $p(\bar{y} | x^{(i)}; \theta_{x | \bar{y}})$ and $p(\bar{x} | y^{(j)}; \theta_{y | \bar{x}})$ , respectively. Learning operates by jointly optimizing the lower bound over both variational and model parameters. Once trained, the variational posterior distributions can be used directly for style transfer. The KL terms in Eq. 3, that appear naturally in the ELBO objective, can be intuitively viewed as regularizers that use the language model priors to bias the induced sentences towards the desired domains. Amortized variational techniques have been most commonly applied to continuous latent variables, as in the case of the variational autoencoder (VAE) (Kingma & Welling, 2013). Here, we use this approach for inference over discrete sequences, which has been shown to be effective in related work on a semi-supervised task (Miao & Blunsom, 2016).
74
+
75
+ Inference Network and Parameter Sharing: Note that the approximate posterior on one domain aims to learn the reverse style transfer distribution, which is exactly the goal of the generative distribution in the opposite domain. For example, the inference network $q(\bar{y} | x^{(i)}; \phi_{\bar{y} | x})$ and the generative distribution $p(y | \bar{x}^{(i)}; \theta_{y | \bar{x}})$ both aim to transform $D_1$ to $D_2$ . Therefore, we use the same architecture for each inference network as used in the transduction models, and tie their parameters: $\phi_{\bar{x} | y} = \theta_{x | \bar{y}}, \phi_{\bar{y} | x} = \theta_{y | \bar{x}}$ . This means we learn only two encoder-decoders overall – which are parameterized by $\theta_{x | \bar{y}}$ and $\theta_{y | \bar{x}}$ respectively – to represent two directions of transfer. In addition to reducing the number of learnable parameters, this parameter tying couples the learning problems for both domains and allows us to jointly learn from the full data. Moreover, inspired by recent work that
76
+
77
+ builds a universal Seq2Seq model to translate between different language pairs (Johnson et al., 2017), we introduce further parameter tying between the two directions of transduction: the same encoder is employed for both $x$ and $y$ , and a domain embedding $c$ is provided to the same decoder to specify the transfer direction, as shown in Figure 2. Ablation analysis in Section 5.3 suggests that parameter sharing is important to achieve good performance.
78
+
79
+ Approximating Gradients of ELBO: The reconstruction and KL terms in Eq. 3 still involve intractable expectations due to the marginalization over the latent sequence, thus we need to approximate their gradients. Gumbel-softmax (Jang et al., 2017) and REINFORCE (Sutton et al., 2000) are often used as stochastic gradient estimators in the discrete case. Since the latent text variables have an extremely large domain, we find that REINFORCE-based gradient estimates result in high variance. Thus, we use the Gumbel-softmax straight-through estimator to backpropagate gradients from the KL terms. However, we find that approximating gradients of the reconstruction loss is much more challenging – both the Gumbel-softmax estimator and REINFORCE are unable to outperform a simple stop-gradient method that does not back-propagate the gradient of the latent sequence to the inference network. This confirms a similar observation in previous work on unsupervised machine translation (Lample et al., 2018). Therefore, we use greedy decoding without recording gradients to approximate the reconstruction term. Note that the inference networks still receive gradients from the prior through the KL term, and their parameters are shared with the decoders which do receive gradients from reconstruction. We consider this to be the best empirical compromise at the moment.
80
+
81
+ Initialization. Good initialization is often necessary for successful optimization of unsupervised learning objectives. In preliminary experiments, we find that the encoder-decoder structure has difficulty generating realistic sentences during the initial stages of training, which usually results in a disastrous local optimum. This is mainly because the encoder-decoder is initialized randomly and there is no direct training signal to specify the desired latent sequence in the unsupervised setting. Therefore, we apply a self-reconstruction loss $\mathcal{L}_{\mathrm{rec}}$ at the initial epochs of training. We denote the output the encoder as $e(\cdot)$ and the decoder distribution as $p_{\mathrm{dec}}$ , then
82
+
83
+ $$
84
+ \mathcal {L} _ {\mathrm {r e c}} = - \alpha \cdot \sum_ {i} [ p _ {\mathrm {d e c}} (e (x ^ {(i)}, c _ {x}) ] - \alpha \cdot \sum_ {j} [ p _ {\mathrm {d e c}} (e (y ^ {(j)}, c _ {y}) ], \tag {4}
85
+ $$
86
+
87
+ $\alpha$ decays from 1.0 to 0.0 linearly in the first $k$ epochs. $k$ is a tunable parameter and usually less than 3 in all our experiments.
88
+
89
+ # 4 CONNECTION TO RELATED WORK
90
+
91
+ Our probabilistic formulation can be connected with recent advances in unsupervised text transduction methods. For example, back translation loss (Sennrich et al., 2016) plays an important role in recent unsupervised machine translation (Artetxe et al., 2018; Lample et al., 2018; Artetxe et al., 2019) and unsupervised style transfer systems (Lample et al., 2019). In order to incorporate back translation loss the source language $x$ is translated to the target language $y$ to form a pseudo-parallel corpus, then a translation model from $y$ to $x$ can be learned on this pseudo bitext just as in supervised setting. While back translation was often explained as a data augmentation technique, in our probabilistic formulation it appears naturally with the ELBO objective as the reconstruction loss term.
92
+
93
+ Some previous work has incorporated a pretrained language models into neural semi-supervised or unsupervised objectives. He et al. (2016) uses the log likelihood of a pretrained language model as the reward to update a supervised machine translation system with policy gradient. Artetxe et al. (2019) utilize a similar idea for unsupervised machine translation. Yang et al. (2018) employed a similar approach, but interpret the LM as an adversary, training the generator to fool the LM. We show how our ELBO objective is connected with these more heuristic LM regularizers by expanding the KL loss term (assume $x$ is observed):
94
+
95
+ $$
96
+ D _ {\mathrm {K L}} \left(q (\bar {y} | x) \| p _ {\mathcal {D} _ {2}} (\bar {y})\right) = - H _ {q} - \mathbb {E} _ {q} \left[ \log p _ {\mathcal {D} _ {2}} (\bar {y}) \right], \tag {5}
97
+ $$
98
+
99
+ Note that the loss used in previous work does not include the negative entropy term, $-H_{q}$ . Our objective results in this additional "regularizer", the negative entropy of the transduction distribution, $-H_{q}$ . Intuitively, $-H_{q}$ helps avoid a peaked transduction distribution, preventing the transduction
100
+
101
+ from constantly generating similar sentences to satisfy the language model. In experiments we will show that this additional regularization is important and helps bypass bad local optima and improve performance. These important differences with past work suggest that a probabilistic view of the unsupervised sequence transduction may provide helpful guidance in determining effective training objectives.
102
+
103
+ # 5 EXPERIMENTS
104
+
105
+ We test our model on five style transfer tasks: sentiment transfer, word substitution decipherment, formality transfer, author imitation, and related language translation. For completeness, we also evaluate on the task of general unsupervised machine translation using standard benchmarks.
106
+
107
+ We compare with the unsupervised machine translation model (UNMT) which recently demonstrated state-of-the-art performance on transfer tasks such as sentiment and gender transfer (Lample et al., 2019). To validate the effect of the negative entropy term in the KL loss term Eq. 5, we remove it and train the model with a back-translation loss plus a language model negative log likelihood loss (which we denote as $\mathrm{BT + NLL}$ ) as an ablation baseline. For each task, we also include strong baseline numbers from related work if available. For our method we select the model with the best validation ELBO, and for UNMT or $\mathrm{BT + NLL}$ we select the model with the best back-translation loss. Complete model configurations and hyperparameters can be found in Appendix A.1.
108
+
109
+ # 5.1 DATASETS AND EXPERIMENT SETUP
110
+
111
+ Word Substitution Decipherment. Word decipherment aims to uncover the plain text behind a corpus that was enciphered via word substitution where word in the vocabulary is mapped to a unique type in a cipher dictionary (Dou & Knight, 2012; Shen et al., 2017; Yang et al., 2018). In our formulation, the model is presented with a non-parallel corpus of English plaintext and the ciphertext. We use the data in (Yang et al., 2018) which provides 200K sentences from each domain. While previous work (Shen et al., 2017; Yang et al., 2018) controls the difficulty of this task by varying the percentage of words that are ciphered, we directly evaluate on the most difficult version of this task - $100\%$ of the words are deciphered (i.e. no vocabulary sharing in the two domains). We select the model with the best unsupervised reconstruction loss, and evaluate with BLEU score on the test set which contains 100K parallel sentences. Results are shown in Table 2.
112
+
113
+ Sentiment Transfer. Sentiment transfer is a task of paraphrasing a sentence with a different sentiment while preserving the original content. Evaluation of sentiment transfer is difficult and is still an open research problem (Mir et al., 2019). Evaluation focuses on three aspects: attribute control, content preservation, and fluency. A successful system needs to perform well with respect to all three aspects. We follow prior work by using three automatic metrics (Yang et al., 2018; Lample et al., 2019): classification accuracy, self-BLEU (BLEU of the output with the original sentence as the reference), and the perplexity (PPL) of each system's output under an external language model. We pretrain a convolutional classifier (Kim, 2014) to assess classification accuracy, and use an LSTM language model pretrained on each domain to compute the PPL of system outputs.
114
+
115
+ We use the Yelp reviews dataset collected by Shen et al. (2017) which contains 250K negative sentences and 380K positive sentences. We also use a small test set that has 1000 human-annotated parallel sentences introduced in Li et al. (2018). We denote the positive sentiment as domain $\mathcal{D}_1$ and the negative sentiment as domain $\mathcal{D}_2$ . We use Self-BLEU and BLEU to represent the BLEU score of the output against the original sentence and the reference respectively. Results are shown in Table 1.
116
+
117
+ Formality Transfer. Next, we consider a harder task of modifying the formality of a sequence. We use the GYAFC dataset (Rao & Tetreault, 2018), which contains formal and informal sentences from two different domains. In this paper, we use the Entertainment and Music domain, which has about $52\mathrm{K}$ training sentences, 5K development sentences, and 2.5K test sentences. This dataset actually contains parallel data between formal and informal sentences, which we use only for evaluation. We follow the evaluation of sentiment transfer task and test models on three axes. Since the test set is
118
+
119
+ Table 1: Results on the sentiment transfer, author imitation, and formality transfer. We list the PPL of pretrained LMs on the test sets of both domains. We only report Self-BLEU on the sentiment task to compare with existing work.
120
+
121
+ <table><tr><td>Task</td><td>Model</td><td>Acc.</td><td>BLEU</td><td>Self-BLEU</td><td>PPLD1</td><td>PPLD2</td></tr><tr><td rowspan="7">Sentiment</td><td>Test Set</td><td>-</td><td>-</td><td>-</td><td>31.97</td><td>21.87</td></tr><tr><td>Shen et al. (2017)</td><td>79.50</td><td>6.80</td><td>12.40</td><td>50.40</td><td>52.70</td></tr><tr><td>Hu et al. (2017)</td><td>87.70</td><td>-</td><td>65.60</td><td>115.60</td><td>239.80</td></tr><tr><td>Yang et al. (2018)</td><td>83.30</td><td>13.40</td><td>38.60</td><td>30.30</td><td>42.10</td></tr><tr><td>UNMT</td><td>87.17</td><td>16.99</td><td>44.88</td><td>26.53</td><td>35.72</td></tr><tr><td>BT+NLL</td><td>88.36</td><td>12.36</td><td>31.48</td><td>8.75</td><td>12.82</td></tr><tr><td>Ours</td><td>87.90</td><td>18.67</td><td>48.38</td><td>27.75</td><td>35.61</td></tr><tr><td rowspan="4">Author Imitation</td><td>Test Set</td><td>-</td><td>-</td><td>-</td><td>132.95</td><td>85.25</td></tr><tr><td>UNMT</td><td>80.23</td><td>7.13</td><td>-</td><td>40.11</td><td>39.38</td></tr><tr><td>BT+NLL</td><td>76.98</td><td>10.80</td><td>-</td><td>61.70</td><td>65.51</td></tr><tr><td>Ours</td><td>81.43</td><td>10.81</td><td>-</td><td>49.62</td><td>44.86</td></tr><tr><td rowspan="4">Formality</td><td>Test Set</td><td>-</td><td>-</td><td>-</td><td>71.30</td><td>135.50</td></tr><tr><td>UNMT</td><td>78.06</td><td>16.11</td><td>-</td><td>26.70</td><td>10.38</td></tr><tr><td>BT+NLL</td><td>82.43</td><td>8.57</td><td>-</td><td>6.57</td><td>8.21</td></tr><tr><td>Ours</td><td>80.46</td><td>18.54</td><td>-</td><td>22.65</td><td>17.23</td></tr></table>
122
+
123
+ a parallel corpus, we only compute reference BLEU and ignore self-BLEU. We use $\mathcal{D}_1$ to denote formal text, and $\mathcal{D}_2$ to denote informal text. Results are shown in Table 1.
124
+
125
+ Author Imitation. Author imitation is the task of paraphrasing a sentence to match another author's style. The dataset we use is a collection of Shakespeare's plays translated line by line into modern English. It was collected by Xu et al. (2012)<sup>7</sup> and used in prior work on supervised style transfer (Jhamtani et al., 2017). This is a parallel corpus and thus we follow the setting in the formality transfer task. We use $\mathcal{D}_1$ to denote modern English, and $\mathcal{D}_2$ to denote Shakespeare-style English. Results are shown in Table 1.
126
+
127
+ Related Language Translation. Next, we test our method on a challenging related language translation task (Pourdamghani & Knight, 2017; Yang et al., 2018). This task is a natural test bed for unsupervised sequence transduction since the goal is to preserve the meaning of the source sentence while rewriting it into the target language. For our experiments, we choose Bosnian (bs) and Serbian (sr) as the related language pairs. We follow Yang et al. (2018) to report BLEU-1 score on this task since BLEU-4 score is close to zero. Results are shown in Table 2.
128
+
129
+ Unsupervised MT. In order to draw connections with a related work on general unsupervised machine translation, we also evaluate on the WMT'16 German English translation task. This task is substantially more difficult than the style transfer tasks considered so far. We compare with the state-of-the-art UNMT system using the existing implementation from the XLM codebase, $^{8}$ and implement our approach in the same framework with XLM initialization for fair comparison. We train both systems on 5M non-parallel sentences from each language. Results are shown in Table 2.
130
+
131
+ In Tables 1 we also list the PPL of the test set under the external LM for both the source and target domain. PPL of system outputs should be compared to PPL of the test set itself because extremely low PPL often indicates that the generated sentences are short or trivial.
132
+
133
+ Table 2: BLEU for decipherment, related language translation (Sr-Bs), and general unsupervised translation (En-De).
134
+
135
+ <table><tr><td>Model</td><td>Decipher</td><td>Sr-Bs</td><td>Bs-Sr</td><td>En-De</td><td>De-En</td></tr><tr><td>Shen et al. (2017)</td><td>50.8</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Yang et al. (2018)</td><td>49.3</td><td>31.0</td><td>33.4</td><td>-</td><td>-</td></tr><tr><td>UNMT</td><td>76.4</td><td>31.4</td><td>33.4</td><td>26.5</td><td>32.2</td></tr><tr><td>BT+NLL</td><td>78.0</td><td>29.6</td><td>31.4</td><td>-</td><td>-</td></tr><tr><td>Ours</td><td>78.4</td><td>36.2</td><td>38.3</td><td>26.9</td><td>32.0</td></tr></table>
136
+
137
+ # 5.2 RESULTS
138
+
139
+ Tables 1 and 2 demonstrate some general trends. First, UNMT is able to outperform other prior methods in unsupervised text style transfer, such as (Yang et al., 2018; Hu et al., 2017; Shen et al., 2017). The performance improvements of UNMT indicate that flexible and powerful
140
+
141
+ architectures are crucial (prior methods generally do not have an attention mechanism). Second, our model achieves comparable classification accuracy to UNMT but outperforms it in all style transfer tasks in terms of the reference-BLEU, which is the most important metric since it directly measures the quality of the final generations against gold parallel data. This indicates that our method is both effective and consistent across many different tasks. Finally, the BT+NLL baseline is sometimes quite competitive, which indicates that the addition of a language model alone can be beneficial. However, our method consistently outperforms the simple BT+NLL method, which indicates the effectiveness of the additional entropy regularizer in Eq. 5 that is the byproduct of our probabilistic formulation.
142
+
143
+ Next, we examine the PPL of the system outputs under pretrained domain LMs, which should be evaluated in comparison with the PPL of the test set itself. For both the sentiment transfer and the formality transfer tasks in Table 1, BT+NLL achieves extremely low PPL, lower than the PPL of the test corpus in the target domain. After a close examination of the output, we find that it contains many repeated and overly simple outputs. For example, the system generates many examples of "I love this place" when transferring negative to positive sentiment (see Appendix A.3 for examples). It is not surprising that such a trivial output has low perplexity, high accuracy, and low BLEU score. On the other hand, our system obtains reasonably competitive PPL, and our approach achieves the highest accuracy and higher BLEU score than the UNMT baseline.
144
+
145
+ # 5.3 FURTHER ABLATIONS AND ANALYSIS
146
+
147
+ Parameter Sharing. We also conducted an experiment on the word substitution decipherment task, where we remove parameter sharing (as explained in Section 3.2) between two directions of transduction distributions, and optimize two encoder-decoder instead. We found that the model only obtained an extremely low BLEU score and failed to generate any meaningful outputs.
148
+
149
+ Performance vs. Domain Divergence. Figure 3 plots the relative improvement of our method over UNMT with respect to accuracy of a naive Bayes' classifier trained to predict the domain of test sentences. Tasks with high classification accuracy likely have more divergent domains. We can see that for decipherment and en-de translation, where the domains have
150
+
151
+ ![](images/d0702c63c86b3029cd11695155b137bb4664da782a79b9fb8c013f5e0f54c567.jpg)
152
+ Figure 3: Improvement over UNMT vs. classification accuracy.
153
+
154
+ different vocabularies and thus are easily distinguished, our method yields a smaller gain over UNMT. This likely indicates that the (discrimination) regularization effect of the LM priors is less important or necessary when the two domains are very different.
155
+
156
+ Why does the proposed model outperform UNMT? Finally, we examine in detail the output of our model and UNMT for the author imitation task. We pick this task because the reference outputs for the test set are provided, aiding analysis. Examples shown in Table 3 demonstrate that UNMT tends to make overly large changes to the source so that the original meaning is lost, while our method is better at preserving the content of the source sentence. Next, we quantitatively examine the outputs from UNMT and our method by comparing the F1 measure of words bucketed by their syntactic tags. We use the open-sourced compare-mt tool (Neubig et al., 2019), and the results are shown in Figure 4. Our system has outperforms UNMT in all word categories. In particular, our system is much better at generating nouns, which likely leads to better content preservation.
157
+
158
+ ![](images/de1a551fb93a71c1822b1b01cb59bf1c27babebf1a631a204589adc2d494d018.jpg)
159
+ Figure 4: Word F1 score by POS tag.
160
+
161
+ Table 3: Examples for author imitation task
162
+
163
+ <table><tr><td>Methods</td><td>Shakespeare to Modern</td></tr><tr><td>Source</td><td>Not to his father&#x27;s .</td></tr><tr><td>Reference</td><td>Not to his father&#x27;s house .</td></tr><tr><td>UNMT</td><td>Not to his brother .</td></tr><tr><td>Ours</td><td>Not to his father&#x27;s house .</td></tr><tr><td>Source</td><td>Send thy man away .</td></tr><tr><td>Reference</td><td>Send your man away .</td></tr><tr><td>UNMT</td><td>Send an excellent word .</td></tr><tr><td>Ours</td><td>Send your man away .</td></tr><tr><td>Source</td><td>Why should you fall into so deep an O ?</td></tr><tr><td>Reference</td><td>Why should you fall into so deep a moan ?</td></tr><tr><td>UNMT</td><td>Why should you carry so nicely , but have your legs ?</td></tr><tr><td>Ours</td><td>Why should you fall into so deep a sin ?</td></tr></table>
164
+
165
+ Table 4: Comparison of gradient approximation on the sentiment transfer task.
166
+
167
+ <table><tr><td>Method</td><td>train ELBO↑</td><td>test ELBO↑</td><td>Acc.</td><td>BLEUr</td><td>BLEUs</td><td>PPLD1</td><td>PPLD2</td></tr><tr><td>Sample-based</td><td>-3.51</td><td>-3.79</td><td>87.90</td><td>13.34</td><td>33.19</td><td>24.55</td><td>25.67</td></tr><tr><td>Greedy</td><td>-2.05</td><td>-2.07</td><td>87.90</td><td>18.67</td><td>48.38</td><td>27.75</td><td>35.61</td></tr></table>
168
+
169
+ Table 5: Comparison of gradient propagation method on the sentiment transfer task.
170
+
171
+ <table><tr><td>Method</td><td>train ELBO↑</td><td>test ELBO↑</td><td>Acc.</td><td>BLEUr</td><td>BLEUs</td><td>PPLD1</td><td>PPLD2</td></tr><tr><td>Gumbel Softmax</td><td>-2.96</td><td>-2.98</td><td>81.30</td><td>16.17</td><td>40.47</td><td>22.70</td><td>23.88</td></tr><tr><td>REINFORCE</td><td>-6.07</td><td>-6.48</td><td>95.10</td><td>4.08</td><td>9.74</td><td>6.31</td><td>4.08</td></tr><tr><td>Stop Gradient</td><td>-2.05</td><td>-2.07</td><td>87.90</td><td>18.67</td><td>48.38</td><td>27.75</td><td>35.61</td></tr></table>
172
+
173
+ Greedy vs. Sample-based Gradient Approximation. In our experiments, we use greedy decoding from the inference network to approximate the expectation required by ELBO, which is a biased estimator. The main purpose of this approach is to reduce the variance of the gradient estimator during training, especially in the early stages when the variance of sample-based approaches is quite high. As an ablation experiment on the sentiment transfer task we compare greedy and sample-based gradient approximations in terms of both train and test ELBO, as well as task performance corresponding to best test ELBO. After the model is fully trained, we find that the sample-based approximation has low variance. With a single sample, the standard deviation of the EBLO is less than 0.3 across 10 different test repetitions. All final reported ELBO values are all computed with this approach, regardless of whether the greedy approximation was used during training. The reported ELBO values are the evidence lower bound per word. Results are shown in Table 4, where the sampling-based training underperforms on both ELBO and task evaluations.
174
+
175
+ # 5.4 COMPARISON OF GRADIENT PROPAGATION METHODS
176
+
177
+ As noted above, to stabilize the training process, we stop gradients from propagating to the inference network from the reconstruction loss. Does this approach indeed better optimize the actual probabilistic objective (i.e. ELBO) or only indirectly lead to improved task evaluations? In this section we use sentiment transfer as an example task to compare different methods for propagating gradients and evaluate both ELBO and task evaluations.
178
+
179
+ Specifically, we compare three different methods:
180
+
181
+ - Stop Gradient: The gradients from reconstruction loss are not propagated to the inference network. This is the method we use in all previous experiments.
182
+ - Gumbel Softmax (Jang et al., 2017): Gradients from the reconstruction loss are propagated to the inference network with the straight-through Gumbel estimator.
183
+ - REINFORCE (Sutton et al., 2000): Gradients from reconstruction loss are propagated to the inference network with ELBO as a reward function. This method has been used in previous work for semi-supervised sequence generation (Miao & Blunsom, 2016; Yin et al., 2018), but often suffers from instability issues.
184
+
185
+ We report the train and test ELBO along with task evaluations in Table 5, and plot the learning curves on validation set in Figure 5. While being much simpler, we show that the stop-gradient trick produces superior ELBO over Gumbel Softmax and REINFORCE. This result suggests that stopping gradient helps better optimize the likelihood objective under our probabilistic formulation in comparison with other optimization techniques that propagate gradients, which is counter-intuitive. A likely explanation is that as a gradient estimator, while clearly biased, stop-gradient has substantially reduced variance. In comparison with other techniques that offer reduced bias but extremely high variance when applied to our model class (which involves discrete sequences as latent variables), stop-gradient actually leads to better optimization of our objective because it achieves better balance of bias and variance overall.
186
+
187
+ ![](images/4121c3976c2d9cf2d697705b3f9c3ffd62ad96e0ae8798e53a28752be642a3d7.jpg)
188
+ Figure 5: ELBO on the validation set v.s. the number training steps.
189
+
190
+ # 6 CONCLUSION
191
+
192
+ We propose a probabilistic generative forumalation that unites past work on unsupervised text style transfer. We show that this probabilistic formulation provides a different way to reason about unsupervised objectives in this domain. Our model leads to substantial improvements on five text style transfer tasks, yielding bigger gains when the styles considered are more difficult to distinguish.
193
+
194
+ # ACKNOWLEDGEMENT
195
+
196
+ The work of Junxian He and Xinyi Wang is supported by the DARPA GAILA project (award HR00111990063) and the Tang Family Foundation respectively. The authors would like to thank Zichao Yang for helpful feedback about the project.
197
+
198
+ # REFERENCES
199
+
200
+ Mikel Artetxe, Gorka Labaka, Eneko Agirre, and Kyunghyun Cho. Unsupervised neural machine translation. In Proceedings of ICLR, 2018.
201
+ Mikel Artetxe, Gorka Labaka, and Eneko Agirre. An effective approach to unsupervised machine translation. arXiv preprint arXiv:1902.01313, 2019.
202
+ Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. Neural machine translation by jointly learning to align and translate. In Proceedings of ICLR, 2015.
203
+ Colin Bannard and Chris Callison-Burch. Paraphrasing with bilingual parallel corpora. In Proceedings of ACL, 2005.
204
+ Samuel R Bowman, Luke Vilnis, Oriol Vinyals, Andrew Dai, Rafal Jozefowicz, and Samy Bengio. Generating sentences from a continuous space. In Proceedings of ConNLL, 2016.
205
+ Qing Dou and Kevin Knight. Dependency-based decipherment for resource-limited machine translation. Proceedings of EMNLP, 2012.
206
+ Di He, Yingce Xia, Tao Qin, Liwei Wang, Nenghai Yu, Tie-Yan Liu, and Wei-Ying Ma. Dual learning for machine translation. In Proceedings of NeurIPS, 2016.
207
+ Zhiting Hu, Zichao Yang, Xiaodan Liang, Ruslan Salakhutdinov, and Eric P Xing. Toward controlled generation of text. In Proceedings of ICML, 2017.
208
+ Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. In Proceedings of ICLR, 2017.
209
+ Harsh Jhamtani, Varun Gangal, Edward Hovy, and Eric Nyberg. Shakespearizing modern language using copy-enriched sequence-to-sequence models. Proceedings of EMNLP, 2017.
210
+
211
+ Melvin Johnson, Mike Schuster, Quoc V Le, Maxim Krikun, Yonghui Wu, Zhifeng Chen, Nikhil Thorat, Fernanda Viégas, Martin Wattenberg, Greg Corrado, et al. Google's multilingual neural machine translation system: Enabling zero-shot translation. Transactions of the Association for Computational Linguistics, 2017.
212
+ Yoon Kim. Convolutional neural networks for sentence classification. In Proceedings of EMNLP, 2014.
213
+ Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.
214
+ Kevin Knight, Anish Nair, Nishit Rathod, and Kenji Yamada. Unsupervised analysis for decipherment problems. In Proceedings of the COLING/ACL 2006 Main Conference Poster Sessions, pp. 499-506, 2006.
215
+ Guillaume Lample, Alexis Conneau, Ludovic Denoyer, and Marc'Aurelio Ranzato. Unsupervised machine translation using monolingual corpora only. arXiv preprint arXiv:1711.00043, 2017.
216
+ Guillaume Lample, Myle Ott, Alexis Conneau, Ludovic Denoyer, and Marc'Aurelio Ranzato. Phrase-based & neural unsupervised machine translation. arXiv preprint arXiv:1804.07755, 2018.
217
+ Guillaume Lample, Sandeep Subramanian, Eric Smith, Ludovic Denoyer, Marc'Aurelio Ranzato, and Y-Lan Boureau. Multiple-attribute text rewriting. In Proceedings of ICLR, 2019.
218
+ Juncen Li, Robin Jia, He He, and Percy Liang. Delete, retrieve, generate: A simple approach to sentiment and style transfer. arXiv preprint arXiv:1804.06437, 2018.
219
+ Yishu Miao and Phil Blunsom. Language as a latent variable: Discrete generative models for sentence compression. In Proceedings of EMNLP, 2016.
220
+ Ronen Mir, Bjarke Felbo, Nick Obradovich, and Iyad Rahwan. Evaluating style transfer for text. In Proceedings of NAACL, 2019.
221
+ Masahiro Mizukami, Graham Neubig, Sakriani Sakti, Tomoki Toda, and Satoshi Nakamura. Linguistic individuality transformation for spoken language. In *Natural Language Dialog Systems and Intelligent Assistants*. 2015.
222
+ Graham Neubig, Zi-Yi Dou, Junjie Hu, Paul Michel, Danish Pruthi, and Xinyi Wang. compare-mt: A tool for holistic comparison of language generation systems. In *Meeting of the North American Chapter of the Association for Computational Linguistics (NAACL) Demo Track*, Minneapolis, USA, June 2019. URL http://arxiv.org/abs/1903.07926.
223
+ Nima Pourdamghani and Kevin Knight. Deciphering related languages. Proceedings of EMNLP, 2017.
224
+ Sudha Rao and Joel Tetreault. Dear sir or madam, may i introduce the gyafc dataset: Corpus, benchmarks and metrics for formality style transfer. arXiv preprint arXiv:1803.06535, 2018.
225
+ Sujith Ravi and Kevin Knight. Deciphering foreign language. In Proceedings of ACL, 2011.
226
+ Alexander M Rush, Sumit Chopra, and Jason Weston. A neural attention model for abstractive sentence summarization. In Proceedings of EMNLP, 2015.
227
+ Rico Sennrich, Barry Haddow, and Alexandra Birch. Improving neural machine translation models with monolingual data. In Proceedings of ACL, 2016.
228
+ Claude Elwood Shannon. A mathematical theory of communication. Bell system technical journal, 27(3):379-423, 1948.
229
+ Tianxiao Shen, Tao Lei, Regina Barzilay, and Tommi Jaakkola. Style transfer from non-parallel text by cross-alignment. In Proceedings of NIPS, 2017.
230
+ Richard S Sutton, David A McAllester, Satinder P Singh, and Yishay Mansour. Policy gradient methods for reinforcement learning with function approximation. In Proceedings of NeurIPS, 2000.
231
+
232
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Proceedings of NeurIPS, 2017.
233
+ Wei Xu, Alan Ritter, William B. Dolan, Ralph Grishman, and Cherry Colin. Paraphrasing for style. COLING, 2012.
234
+ Zichao Yang, Zhiting Hu, Chris Dyer, Eric P Xing, and Taylor Berg-Kirkpatrick. Unsupervised text style transfer using language models as discriminators. In Proceedings of NeurIPS, 2018.
235
+ Pengcheng Yin, Chunting Zhou, Junxian He, and Graham Neubig. Structvae: Tree-structured latent variable models for semi-supervised semantic parsing. In Proceedings of ACL, 2018.
236
+ Tiancheng Zhao, Ran Zhao, and Maxine Eskenazi. Learning discourse-level diversity for neural dialog models using conditional variational autoencoders. In Proceedings of ACL, 2017.
237
+
238
+ # A APPENDIX
239
+
240
+ # A.1 MODEL CONFIGURATIONS.
241
+
242
+ We adopt the following attentional encoder-decoder architecture for UNMT, BT+NLL, and our method across all the experiments:
243
+
244
+ - We use word embeddings of size 128.
245
+ - We use 1 layer LSTM with hidden size of 512 as both the encoder and decoder.
246
+ - We apply dropout to the readout states before softmax with a rate of 0.3.
247
+
248
+ - Following Lample et al. (2019), we add a max pooling operation over the encoder hidden states before feeding it to the decoder. Intuitively the pooling window size would control how much information is preserved during transduction. A window size of 1 is equivalent to standard attention mechanism, and a large window size corresponds to no attention. See Appendix A.2 for how to select the window size.
249
+ - There is a noise function for UNMT baseline in its denoising autoencoder loss (Lample et al., 2017; 2019), which is critical for its success. We use the default noise function and noise hyperparameters in Lample et al. (2017) when running the UNMT model. For BT+NLL and our method we found that adding the extra noise into the self-reconstruction loss (Eq. 4) is only helpful when the two domains are relatively divergent (decipherment and related language translation tasks) where the language models play a less important role. Therefore, we add the default noise from UNMT to Eq. 4 for decipherment and related language translation tasks only, and do not use any noise for sentiment, author imitation, and formality tasks.
250
+
251
+ # A.2 HYPERPARAMETER TUNING.
252
+
253
+ We vary pooling windows size as $\{1,5\}$ , the decaying patience hyperparameter $k$ for self-reconstruction loss (Eq. 4) as $\{1,2,3\}$ . For the baseliens UNMT and BT+NLL, we also try the option of not annealing the self-reconstruction loss at all as in the unsupervised machine translation task (Lample et al., 2018). We vary the weight $\lambda$ for the NLL term (BT+NLL) or the KL term (our method) as $\{0.001,0.01,0.03,0.05,0.1\}$ .
254
+
255
+ # A.3 SENTIMENT TRANSFER EXAMPLE OUTPUTS
256
+
257
+ We list some examples of the sentiment transfer task in Table 6. Notably, the BT+NLL method tends to produce extremely short and simple sentences.
258
+
259
+ # A.4 REPETITIVE EXAMPLES OF BT+NLL
260
+
261
+ In Section 5 we mentioned that the baseline $\mathrm{BT + NLL}$ has a low perplexity for some tasks because it tends to generate overly simple and repetitive sentences. From Table 1 we see that two representative tasks are sentiment transfer and formatily transfer. In Appendix A.3 we have demonstrated some examples for sentiment transfer, next we show some repetitive samples of $\mathrm{BT + NLL}$ in Table 7.
262
+
263
+ Table 6: Random Sentiment Transfer Examples
264
+
265
+ <table><tr><td>Methods</td><td>negative to positive</td></tr><tr><td>Original</td><td>the cake portion was extremely light and a bit dry .</td></tr><tr><td>UNMT</td><td>the cake portion was extremely light and a bit spicy .</td></tr><tr><td>BT+NLL</td><td>the cake portion was extremely light and a bit dry .</td></tr><tr><td>Ours</td><td>the cake portion was extremely light and a bit fresh .</td></tr><tr><td>Original</td><td>the “ chicken ” strip were paper thin oddly flavored strips .</td></tr><tr><td>UNMT</td><td>the “ chicken ” were extra crispy noodles were fresh and incredible .</td></tr><tr><td>BT+NLL</td><td>the service was great .</td></tr><tr><td>Ours</td><td>the “ chicken ” strip were paper sweet &amp; juicy flavored .</td></tr><tr><td>Original</td><td>if i could give them a zero star review i would !</td></tr><tr><td>UNMT</td><td>if i could give them a zero star review i would !</td></tr><tr><td>BT+NLL</td><td>i love this place .</td></tr><tr><td>Ours</td><td>i love the restaurant and give a great review i would !</td></tr><tr><td></td><td>positive to negative</td></tr><tr><td>Original</td><td>great food , staff is unbelievably nice .</td></tr><tr><td>UNMT</td><td>no , food is n’t particularly friendly .</td></tr><tr><td>BT+NLL</td><td>i will not be back .</td></tr><tr><td>Ours</td><td>no apologies , staff is unbelievably poor .</td></tr><tr><td>Original</td><td>my wife and i love coming here !</td></tr><tr><td>UNMT</td><td>my wife and i do n’t come here !</td></tr><tr><td>BT+NLL</td><td>i will not be back .</td></tr><tr><td>Ours</td><td>my wife and i walked out the last time .</td></tr><tr><td>Original</td><td>my wife and i love coming here !</td></tr><tr><td>UNMT</td><td>my wife and i do n’t come here !</td></tr><tr><td>BT+NLL</td><td>i will not be back .</td></tr><tr><td>Ours</td><td>my wife and i walked out the last time .</td></tr><tr><td>Original</td><td>the premier hookah lounge of las vegas !</td></tr><tr><td>UNMT</td><td>the worst museum of las vegas !</td></tr><tr><td>BT+NLL</td><td>the worst frame shop of las vegas !</td></tr><tr><td>Ours</td><td>the hallways scam lounge of las vegas !</td></tr></table>
266
+
267
+ Table 7: Repetitive examples of BT+NLL baseline on Formality transfer.
268
+
269
+ <table><tr><td>Original</td><td>Transferred</td></tr><tr><td colspan="2">formal to informal</td></tr><tr><td>I like Rhythm and Blue music .</td><td>I like her and I don’t know .</td></tr><tr><td>There’s nothing he needs to change .</td><td>I don’t know , but I don’t know .</td></tr><tr><td>I enjoy watching my companion attempt to role @-@ play with them .</td><td>I don’t know , but I don’t know .</td></tr><tr><td>I am watching it right now .</td><td>I don’t know , but I don’t know .</td></tr><tr><td>That is the key point , that you fell asleep .</td><td>I don’t know , but I don’t know .</td></tr><tr><td colspan="2">informal to formal</td></tr><tr><td>its a great source just download it .</td><td>I do not know , but I do not know .</td></tr><tr><td>Happy Days , it was the coolest !</td><td>I do not know , but I do not know .</td></tr><tr><td>I used to play flute but once I started sax , I got hooked .</td><td>I do not know , but I do not know .</td></tr><tr><td>The word you are looking for is ...... strengths</td><td>The word you are looking for is : )</td></tr><tr><td>Plus you can tell she really cared about her crew .</td><td>Plus you can tell she really cared about her crew .</td></tr></table>
aprobabilisticformulationofunsupervisedtextstyletransfer/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25da04321ec826b1e850208acdddc75a4ff469533e9d9d826f0d83211ee8e7d4
3
+ size 524577
aprobabilisticformulationofunsupervisedtextstyletransfer/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3bfdae4b7f1c25c1b424699b9817f998f94c0ef4474b8aa4b5a36816036d56e
3
+ size 367119
asignalpropagationperspectiveforpruningneuralnetworksatinitialization/fb887d1b-cf6a-4d54-b0a8-375a2f81ac23_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea920f6c568ceb995d2d42d2aef888b30d949c9a3f7ee722e2b6bbeb4b15b96c
3
+ size 111014
asignalpropagationperspectiveforpruningneuralnetworksatinitialization/fb887d1b-cf6a-4d54-b0a8-375a2f81ac23_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:adc542b0a3f414bfc6f44324fd05119862da595804e76f1759d32576408d4b0f
3
+ size 125206
asignalpropagationperspectiveforpruningneuralnetworksatinitialization/fb887d1b-cf6a-4d54-b0a8-375a2f81ac23_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8bd6603cdece778e5a7092c63744b1a8aa6c6049f05cde91887ecfa1e5fb03f
3
+ size 2238956
asignalpropagationperspectiveforpruningneuralnetworksatinitialization/full.md ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A SIGNAL PROPAGATION PERSPECTIVE FOR PRUNING NEURAL NETWORKS AT INITIALIZATION
2
+
3
+ Namhoon Lee $^{1}$ , Thalaiyasingam Ajanthan $^{2}$ , Stephen Gould $^{2}$ , Philip H. S. Torr $^{1}$
4
+
5
+ <sup>1</sup>University of Oxford
6
+
7
+ $^{2}$ Australian National University
8
+
9
+ 1{namhoon,phst}@robots.ox.ac.uk
10
+
11
+ 2{thalaiyasingam.ajanthan,stephen.gould}@anu.edu.au
12
+
13
+ # ABSTRACT
14
+
15
+ Network pruning is a promising avenue for compressing deep neural networks. A typical approach to pruning starts by training a model and then removing redundant parameters while minimizing the impact on what is learned. Alternatively, a recent approach shows that pruning can be done at initialization prior to training, based on a saliency criterion called connection sensitivity. However, it remains unclear exactly why pruning an untrained, randomly initialized neural network is effective. In this work, by noting connection sensitivity as a form of gradient, we formally characterize initialization conditions to ensure reliable connection sensitivity measurements, which in turn yields effective pruning results. Moreover, we analyze the signal propagation properties of the resulting pruned networks and introduce a simple, data-free method to improve their trainability. Our modifications to the existing pruning at initialization method lead to improved results on all tested network models for image classification tasks. Furthermore, we empirically study the effect of supervision for pruning and demonstrate that our signal propagation perspective, combined with unsupervised pruning, can be useful in various scenarios where pruning is applied to non-standard arbitrarily-designed architectures.
16
+
17
+ # 1 INTRODUCTION
18
+
19
+ Deep learning has made great strides in machine learning and been applied to various fields from computer vision and natural language processing, to health care and playing games (LeCun et al., 2015). Despite the immense success, however, it remains challenging to deal with the excessive computational and memory requirements of large neural network models. To this end, lightweight models are often preferred, and network pruning, a technique to reduce parameters in a network, has been widely employed to compress deep neural networks (Han et al., 2016). Nonetheless, designing pruning algorithms has been often purely based on ad-hoc intuition lacking rigorous underpinning, partly because pruning was typically carried out after training the model as a post-processing step or interwoven with the training procedure, without adequate tools to analyze.
20
+
21
+ Recently, Lee et al. (2019) have shown that pruning can be done on randomly initialized neural networks in a single-shot prior to training (i.e., pruning at initialization). They empirically showed that as long as the initial random weights are drawn from appropriately scaled Gaussians (e.g., Glorot & Bengio (2010)), their pruning criterion called connection sensitivity can be used to prune deep neural networks, often to an extreme level of sparsity while maintaining good accuracy once trained. However, it remains unclear as to why pruning at initialization is effective, how it should be understood theoretically and whether it can be extended further.
22
+
23
+ In this work, we first look into the effect of initialization on pruning, and find that initial weights have critical impact on connection sensitivity, and therefore, pruning results. Deeper investigation shows that connection sensitivity is determined by an interplay between gradients and weights. Therefore when the initial weights are not chosen appropriately, the propagation of input signals into layers of
24
+
25
+ these random weights can result in saturating error signals (i.e., gradients) under backpropagation, and hence unreliable connection sensitivity, potentially leading to a catastrophic pruning failure.
26
+
27
+ This result leads us to develop a signal propagation perspective for pruning at initialization, and to provide a formal characterization of how a network needs to be initialized for reliable connection sensitivity measurements and in turn effective pruning. Precisely, we show that a sufficient condition to ensure faithful<sup>1</sup> connection sensitivity is layerwise dynamical isometry, which is defined as all singular values of the layerwise Jacobians being concentrated around 1. Our signal propagation perspective is inspired by the recent literature on dynamical isometry and mean field theory (Saxe et al., 2014; Poole et al., 2016; Schoenholz et al., 2017; Pennington et al., 2017), in which the general signal propagation in neural networks is studied. We extend this result to understanding and improving pruning at initialization.
28
+
29
+ Moreover, we study signal propagation in the pruned sparse networks and its effect on trainability. We find that pruning neural networks can indeed break dynamical isometry, and hence, hinders signal propagation and degrades the training performance of the resulting sparse network. In order to address this issue, we propose a simple, yet effective data-free method to recover the layerwise orthogonality given the sparse topology, which in turn improves the training performance of the compressed network significantly. Our analysis further reveals that in addition to signal propagation, the choice of pruning method and sparsity level can influence trainability in sparse neural networks.
30
+
31
+ Perfect layerwise dynamical isometry cannot always be ensured in the modern networks that have components such as ReLU nonlinearities (Pennington et al., 2017) and/or batch normalization (Yang et al., 2019). Even in such cases, however, our experiments on various modern architectures (including convolutional and residual neural networks) indicate that connection sensitivity computed based on layerwise dynamical isometry is robust and consistently outperforms pruning based on other initialization schemes. This indicates that the signal propagation perspective is not only important to theoretically understand pruning at initialization, but also it improves the results of pruning for a range of networks of practical interest.
32
+
33
+ Furthermore, this signal propagation perspective for pruning poses another important question: how informative is the error signal computed on randomly initialized networks, or can we prune neural networks even without supervision? To understand this, we compute connection sensitivity scores with different unsupervised surrogate losses and evaluate the pruning results. Interestingly, our results indicate that we can in fact prune networks in an unsupervised manner to extreme sparsity levels without compromising accuracy, and it often compares competitively to pruning with supervision. Moreover, we test if pruning at initialization can be extended to obtain architectures that yield better performance than standard pre-designed architectures with the same number of parameters. In fact, this process, which we call neural architecture sculpting, compares favorably against hand-designed architectures, taking network pruning one step further towards neural architecture search.
34
+
35
+ # 2 PRELIMINARIES
36
+
37
+ Pruning at initialization. The principle behind conventional approaches for network pruning is to find unnecessary parameters, such that by eliminating them the complexity of the model is reduced while minimizing the impact on what is learned (Reed, 1993). Naturally, a typical pruning algorithm starts after convergence to a minimum or training to some degree. This pretraining requirement has been left unattended until Lee et al. (2019) recently showed that pruning can be performed on untrained networks at initialization prior to training. They proposed a method called SNIP which relies on a new saliency criterion, namely connection sensitivity, defined as follows:
38
+
39
+ $$
40
+ s _ {j} (\mathbf {w}; \mathcal {D}) = \frac {\left| g _ {j} (\mathbf {w} ; \mathcal {D}) \right|}{\sum_ {k = 1} ^ {m} \left| g _ {k} (\mathbf {w} ; \mathcal {D}) \right|}, \quad \text {w h e r e} \quad g _ {j} (\mathbf {w}; \mathcal {D}) = \left. \frac {\partial L (\mathbf {c} \odot \mathbf {w} ; \mathcal {D})}{\partial c _ {j}} \right| _ {\mathbf {c} = \mathbf {1}}. \tag {1}
41
+ $$
42
+
43
+ Here, $s_j$ is the saliency of the parameter $j$ , $\mathbf{w} \in \mathbb{R}^m$ is the network parameters, $\mathbf{c} \in \{0,1\}^m$ is the auxiliary indicator variables representing the connectivity of network parameters, $m$ is the total number of parameters in the network, and $\mathcal{D}$ is a given dataset. Also, $g_j$ is the derivative of the loss $L$ with respect to $c_j$ , which turns out to be an infinitesimal approximation of the change in the
44
+
45
+ loss with respect to removing the parameter $j$ . Designed to be computed at initialization, pruning is performed by keeping top- $\kappa$ (where $\kappa$ denotes a desired sparsity level) salient parameters based on the above sensitivity scores.
46
+
47
+ Dynamical isometry and mean field theory. The success of training deep neural networks is due in large part to the initial weights (Hinton & Salakhutdinov, 2006; Glorot & Bengio, 2010; Pascanu et al., 2013). In essence, the principle behind these random weight initializations is to have the mean squared singular value of a network's input-output Jacobian close to 1, so that on average, an error vector will preserve its norm under backpropagation; however, this is not sufficient to prevent amplification or attenuation of an error vector on worst case. A stronger condition that having as many singular values as possible near 1 is called dynamical isometry (Saxe et al., 2014). Under this condition, error signals backpropagate isometrically through the network, approximately preserving its norm and all angles between error vectors. Alongside dynamical isometry, mean field theory is used to develop a theoretical understanding of signal propagation in neural networks with random parameters (Poole et al., 2016). Precisely, the mean field approximation states that preactivations of wide, untrained neural networks can be captured as a Gaussian distribution. Recent works revealed a maximum depth through which signals can propagate at initialization, and verified that networks are trainable when signals can travel all the way through them (Schoenholz et al., 2017; Yang & Schoenholz, 2017; Xiao et al., 2018).
48
+
49
+ # 3 SIGNAL PROPAGATION PERSPECTIVE TO PRUNING RANDOM NETWORKS
50
+
51
+ Problem setup. Consider a fully-connected, feed-forward neural network with weight matrices $\mathbf{W}^l\in \mathbb{R}^{N\times N}$ , biases $\mathbf{b}^l\in \mathbb{R}^N$ , pre-activations $\mathbf{h}^l\in \mathbb{R}^N$ , and post-activations $\mathbf{x}^l\in \mathbb{R}^N$ , for $l\in \{1\dots K\}$ up to $K$ layers. Now, the feed-forward dynamics of a network can be written as,
52
+
53
+ $$
54
+ \mathbf {x} ^ {l} = \phi \left(\mathbf {h} ^ {l}\right), \quad \mathbf {h} ^ {l} = \mathbf {W} ^ {l} \mathbf {x} ^ {l - 1} + \mathbf {b} ^ {l}, \tag {2}
55
+ $$
56
+
57
+ where $\phi : \mathbb{R} \to \mathbb{R}$ is an elementwise nonlinearity, and the input is denoted by $\mathbf{x}^0$ . Given the network configuration, the parameters are initialized by sampling from a probability distribution, typically a zero mean Gaussian with scaled variance (LeCun et al., 1998; Glorot & Bengio, 2010).
58
+
59
+ # 3.1 EFFECT OF INITIALIZATION ON PRUNING
60
+
61
+ It is observed in Lee et al. (2019) that pruning results tend to improve when initial weights are drawn from a scaled Gaussian, or so-called variance scaling initialization (LeCun et al., 1998; Glorot & Bengio, 2010; He et al., 2015). As we wish to better understand the role of these random initial weights in pruning, we will examine the effect of varying initialization on the pruning results.
62
+
63
+ In essence, variance scaling schemes introduce normalization factors to adjust the variance $\sigma$ of the weight sampling distribution, which can be summarized as $\sigma \rightarrow \frac{\alpha}{\psi_l}\sigma$ , where $\psi_l$ is a layerwise scalar that depends on an architecture specification such as the number of output neurons in the previous layer (e.g., fan-in), and $\alpha$ is a global scalar throughout the network. Notice in case of a network with layers of the same width, the variance can be controlled by a single scalar $\gamma = \frac{\alpha}{\psi}$ as $\psi_l = \psi$ for all layers $l$ . In particular, we take both linear and tanh multilayer perceptron networks (MLP) of layers $K = 7$ and width $N = 100$ on MNIST with $\sigma = 1$ as the default, similar to Saxe et al. (2014). We initialize these networks with different $\gamma$ , compute the connection sensitivity, prune it, and then visualize layerwise the resulting sparsity patterns $\mathbf{c}$ as well as the corresponding connection sensitivity used for pruning in Figure 1.
64
+
65
+ It is seen in the sparsity patterns that for the tanh network, unlike the linear case, more parameters tend to be pruned in the later layers than the earlier layers. As a result, this limits the learning capability of the subnetwork critically when a high sparsity level is requested; e.g., for $\bar{\kappa} = 90\%$ , only a few parameters in later layers are retained after pruning. This is explained by the connection sensitivity plot. The sensitivity of parameters in the nonlinear network tends to decrease towards the later layers, and therefore, choosing the top- $\kappa$ parameters globally based on the sensitivity scores results in a subnetwork in which retained parameters are distributed highly non-uniformly and sparsely towards the end of the network. This result implies that the initial weights have a crucial effect on the connection sensitivity, and from there, the pruning results.
66
+
67
+ ![](images/2bd44a486af4c4a522d2676153835040a6683f21637587ccc242dda01ab6fdae.jpg)
68
+ Figure 1: (left) layerwise sparsity patterns $c \in \{0,1\}^{100 \times 100}$ obtained as a result of pruning for the sparsity level $\bar{\kappa} = \{10,..,90\} \%$ . Here, black(0)/white(1) pixels refer to pruned/retained parameters; (right) connection sensitivities (CS) measured for the parameters in each layer. All networks are initialized with $\gamma = 1.0$ . Unlike the linear case, the sparsity pattern for the tanh network is nonuniform over different layers. When pruning for a high sparsity level (e.g., $\bar{\kappa} = 90\%$ ), this becomes critical and leads to poor learning capability as there are only a few parameters left in later layers. This is explained by the connection sensitivity plot which shows that for the nonlinear network parameters in later layers have saturating, lower connection sensitivities than those in earlier layers.
69
+
70
+ ![](images/e9ed295c32281b06f5e6c38c6b8406867b89660d25e675de00c4bb0c878c59a6.jpg)
71
+
72
+ ![](images/e2575c247dc4c089f6eac7c5cabde9a215558fb67fea4f0dd34d1ea4524d7ced.jpg)
73
+
74
+ # 3.2 GRADIENT SIGNAL IN CONNECTION SENSITIVITY
75
+
76
+ We posit that the unreliability of connection sensitivity observed in Figure 1 is due to poor signal propagation: an initialization that projects the input signal to be strongly amplified or attenuated in the forward pass will saturate the error signal under backpropagation (i.e., gradients), and hence will result in poorly calibrated connection sensitivity scores across layers, which will eventually lead to poor pruning results, potentially with complete disconnection of signal paths (e.g., entire layer).
77
+
78
+ Precisely, we give the relationship between the connection sensitivity and the gradients as follows. From Equation 1, connection sensitivity is a normalized magnitude of gradients with respect to the connectivity parameters $\mathbf{c}$ . Here, we use the vectorized notation where $\mathbf{w}$ denotes all learnable parameters and $\mathbf{c}$ denotes the corresponding connectivity parameters. From chain rule, we can write:
79
+
80
+ $$
81
+ \left. \frac {\partial L (\mathbf {c} \odot \mathbf {w} ; \mathcal {D})}{\partial \mathbf {c}} \right| _ {\mathbf {c} = 1} = \left. \frac {\partial L (\mathbf {c} \odot \mathbf {w} ; \mathcal {D})}{\partial (\mathbf {c} \odot \mathbf {w})} \right| _ {\mathbf {c} = 1} \odot \mathbf {w} = \frac {\partial L (\mathbf {w} ; \mathcal {D})}{\partial \mathbf {w}} \odot \mathbf {w}. \tag {3}
82
+ $$
83
+
84
+ Therefore, $\partial L / \partial \mathbf{c}$ is the gradients $\partial L / \partial \mathbf{w}$ amplified (or attenuated) by the corresponding weights $\mathbf{w}$ , i.e., $\partial L / \partial c_{j} = \partial L / \partial w_{j}w_{j}$ for all $j\in \{1\dots m\}$ . Considering $\partial L / \partial c_{j}$ for a given $j$ , since $w_{j}$ does not depend on any other layers or signal propagation, the only term that depends on signal propagation in the network is the gradient term $\partial L / \partial w_{j}$ . Hence, a necessary condition to ensure faithful $\partial L / \partial \mathbf{c}$ (and connection sensitivity) is that the gradients $\partial L / \partial \mathbf{w}$ need to be faithful. In the following section, we formalize this from a signal propagation perspective, and characterize an initial condition that ensures reliable connection sensitivity measurement.
85
+
86
+ # 3.3 LAYERWISE DYNAMICAL ISOMETRY
87
+
88
+ # 3.3.1 GRADIENTS IN TERMS OF JACOBIANS
89
+
90
+ From the feed-forward dynamics of a network in Equation 2, the network's input-output Jacobian corresponding to a given input $\mathbf{x}^0$ can be written, by the chain rule of differentiation, as:
91
+
92
+ $$
93
+ \mathbf {J} ^ {0, K} = \frac {\partial \mathbf {x} ^ {K}}{\partial \mathbf {x} ^ {0}} = \prod_ {l = 1} ^ {K} \mathbf {D} ^ {l} \mathbf {W} ^ {l}, \tag {4}
94
+ $$
95
+
96
+ where $\mathbf{D}^l\in \mathbb{R}^{N\times N}$ is a diagonal matrix with entries $\mathbf{D}_{ij}^{l} = \phi^{\prime}(h_{i}^{l})\delta_{ij}$ , with $\phi^\prime$ denoting the derivative of nonlinearity $\phi$ , and $\delta_{ij} = \mathbb{1}[i = j]$ is the Kronecker delta. Here, we will use $\mathbf{J}^{k,l}$ to denote the Jacobian from layer $k$ to layer $l$ . Now, we give the relationship between gradients and Jacobians:
97
+
98
+ Proposition 1. Let $\epsilon = \partial L / \partial \mathbf{x}^K$ denote the error signal and $\mathbf{x}^0$ denote the input signal. Then,
99
+
100
+ 1. the gradients satisfy:
101
+
102
+ $$
103
+ \mathbf {g} _ {\mathbf {w} ^ {l}} ^ {T} = \epsilon \mathbf {J} ^ {l, K} \mathbf {D} ^ {l} \otimes \mathbf {x} ^ {l - 1}, \tag {5}
104
+ $$
105
+
106
+ where $\mathbf{J}^{l,K} = \partial \mathbf{x}^K /\partial \mathbf{x}^l$ is the Jacobian from layer $l$ to the output and $\otimes$ is the Kronecker product. 2. additionally, for linear networks, i.e., when $\phi$ is the identity:
107
+
108
+ $$
109
+ \mathbf {g} _ {\mathbf {w} ^ {l}} ^ {T} = \epsilon \mathbf {J} ^ {l, K} \otimes \left(\mathbf {J} ^ {0, l - 1} \mathbf {x} ^ {0} + \mathbf {a}\right), \tag {6}
110
+ $$
111
+
112
+ where $\mathbf{J}^{0,l - 1} = \partial \mathbf{x}^{l - 1} / \partial \mathbf{x}^0$ is the Jacobian from the input to layer $l - 1$ and $\mathbf{a}\in \mathbb{R}^N$ is a constant term that does not depend on $\mathbf{x}^0$ .
113
+
114
+ Proof. This can be proved by an algebraic manipulation of the chain rule while using the feedforward dynamics in Equation 2. We provide the full derivation in Appendix A. $\square$
115
+
116
+ Notice that the gradient at layer $l$ constitutes both the backward propagation of the error signal $\epsilon$ up to layer $l$ and the forward propagation of the input signal $\mathbf{x}^0$ up to layer $l - 1$ . Moreover, especially in the linear case, the signal propagation in both directions is governed by the corresponding Jacobians. We believe that this interpretation of gradients is useful as it sheds light on how signal propagation affects the gradients. To this end, we next analyze the conditions on the Jacobians, which would guarantee faithful signal propagation in the network, and consequently, faithful gradients.
117
+
118
+ # 3.3.2 ENSURING FAITHFUL GRADIENTS
119
+
120
+ Here, we first consider the layerwise signal propagation which would be useful to derive properties on the initialization to ensure faithful gradients. To this end, let us consider the layerwise Jacobian:
121
+
122
+ $$
123
+ \mathbf {J} ^ {l - 1, l} = \frac {\partial \mathbf {x} ^ {l}}{\partial \mathbf {x} ^ {l - 1}} = \mathbf {D} ^ {l} \mathbf {W} ^ {l}. \tag {7}
124
+ $$
125
+
126
+ Note that it is sufficient to have layerwise dynamical isometry in order to ensure faithful signal propagation in the network.
127
+
128
+ Definition 1. (Layerwise dynamical isometry) Let $\mathbf{J}^{l - 1,l} = \frac{\partial\mathbf{x}^l}{\partial\mathbf{x}^{l - 1}}\in \mathbb{R}^{N_l\times N_{l - 1}}$ be the Jacobian matrix of layer $l$ . The network is said to satisfy layerwise dynamical isometry if the singular values of $\mathbf{J}^{l - 1,l}$ are concentrated near 1 for all layers, i.e., for a given $\epsilon >0$ , the singular value $\sigma_{j}$ satisfies $|1 - \sigma_{j}|\leq \epsilon$ for all $j$ .
129
+
130
+ This would guarantee that the signal from layer $l$ to $l - 1$ (or vice versa) is propagated without amplification or attenuation in any of its dimension. From Proposition 1 and Equation 7, by induction, it is easy to show that if the layerwise signal propagation is faithful, the error and input signals will faithfully propagate throughout the network, resulting in faithful gradients.
131
+
132
+ For linear networks, $\mathbf{J}^{l - 1,l} = W^l$ . Therefore, one can initialize the weight matrix to be orthogonal such that $(\mathbf{W}^l)^T\mathbf{W}^l = \mathbf{I}$ , where $\mathbf{I}$ is the identity matrix of dimension $N$ . In this case, all singular values of $\mathbf{W}^l$ are exactly 1 (i.e., exact dynamical isometry), and such an initialization guarantees faithful gradients. While a linear network is of little practical use, we note that it helps to develop theoretical analysis and provides intuition as to why dynamical isometry is a useful measure.
133
+
134
+ For nonlinear networks, the diagonal matrix $\mathbf{D}^l$ needs to be accounted for as it depends on the pre-activations $\mathbf{h}^l$ at layer $l$ . In this case, it is important to have the pre-activations $\mathbf{h}^l$ fall into the linear region of the nonlinear function $\phi$ . Precisely, mean-field theory assumes that for large- $N$ limit, the empirical distribution of the pre-activations $\mathbf{h}^l$ converges to a Gaussian with zero mean and variance $q^l$ , where the variance follows a recursion relation (Poole et al., 2016). Therefore, to achieve layerwise dynamical isometry, the idea becomes to find a fixed point $q^*$ such that $\mathbf{h}^l \sim \mathcal{N}(0, q^*)$ for all $l \in \{1 \dots K\}$ . Such a fixed point makes $\mathbf{D}^l = \mathbf{D}$ for all layers, and therefore, the pre-activations are placed in the linear region of the nonlinearity. Then, given the nonlinearity, one can find a rescaling such that $(\mathbf{D}\mathbf{W}^l)^T (\mathbf{D}\mathbf{W}^l) = (\mathbf{W}^l)^T\mathbf{W}^l / \sigma_w^2 = \mathbf{I}$ . The procedure for finding the rescaling $\sigma_w^2$ for various nonlinearities are discussed in Pennington et al. (2017; 2018). Also, this easily extends to convolutional neural networks using the initialization method in Xiao et al. (2018).
135
+
136
+ Table 1: Jacobian singular values and resulting sparse networks for the 7-layer tanh MLP network considered in section 3.1. SG, CN, and Sparsity refer to Scaled Gaussian, Condition Number (i.e., $s_{\mathrm{max}} / s_{\mathrm{min}}$ , where $s_{\mathrm{max}}$ and $s_{\mathrm{min}}$ are the maximum and minimum Jacobian singular values), and a ratio of pruned prameters to the total number of parameters, respectively. SG ( $\gamma = 10^{-2}$ ) is equivalent to the variance scaling initialization as in LeCun et al. (1998); Glorot & Bengio (2010). The failure cases correspond to unreliable connection sensitivity resulted from poorly conditioned initial Jacobians.
137
+
138
+ <table><tr><td rowspan="2">Initialization</td><td colspan="3">Jacobian singular values</td><td colspan="8">Sparsity in pruned network (across layers)</td><td rowspan="2">Error</td></tr><tr><td>Mean</td><td>Std</td><td>CN</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td><td>7</td><td></td></tr><tr><td>SG (γ=10-4)</td><td>2.46e-07</td><td>9.90e-08</td><td>4.66e+00</td><td>0.97</td><td>0.80</td><td>0.80</td><td>0.80</td><td>0.80</td><td>0.81</td><td>0.48</td><td>2.66</td><td></td></tr><tr><td>SG (γ=10-3)</td><td>5.74e-04</td><td>2.45e-04</td><td>8.54e+00</td><td>0.97</td><td>0.80</td><td>0.80</td><td>0.80</td><td>0.80</td><td>0.81</td><td>0.48</td><td>2.67</td><td></td></tr><tr><td>SG (γ=10-2)</td><td>4.49e-01</td><td>2.51e-01</td><td>5.14e+01</td><td>0.96</td><td>0.80</td><td>0.80</td><td>0.80</td><td>0.81</td><td>0.81</td><td>0.49</td><td>2.67</td><td></td></tr><tr><td>SG (γ=10-1)</td><td>2.30e+01</td><td>2.56e+01</td><td>2.92e+04</td><td>0.96</td><td>0.81</td><td>0.82</td><td>0.82</td><td>0.82</td><td>0.80</td><td>0.45</td><td>2.61</td><td></td></tr><tr><td>SG (γ=100)</td><td>1.03e+03</td><td>2.61e+03</td><td>3.34e+11</td><td>0.85</td><td>0.88</td><td>0.99</td><td>1.00</td><td>1.00</td><td>1.00</td><td>0.91</td><td>90.2</td><td></td></tr><tr><td>SG (γ=101)</td><td>3.67e+04</td><td>2.64e+05</td><td>inf</td><td>0.84</td><td>0.95</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>90.2</td><td></td></tr></table>
139
+
140
+ We note that dynamical isometry is in fact a weaker condition than layerwise dynamical isometry. However, in practice, the initialization suggested in the existing works (Pennington et al., 2017; Xiao et al., 2018), i.e., orthogonal initialization for weight matrices in each layer with rescaling based on mean field theory, satisfy layerwise dynamical isometry, even though this term was not mentioned.
141
+
142
+ Now, recall from Section 3.1 that a network is pruned with a global threshold based on connection sensitivity, and from Section 3.2 that the connection sensitivity is the gradients scaled by the weights. This in turn implies that the connection sensitivity scores across layers are required to be of the same scale. To this end, we require the gradients to be faithful and the weights to be in the same scale for all the layers. Notice, this condition is trivially satisfied when the layerwise dynamical isometry is ensured, as each layer is initialized identically (i.e., orthogonal initialization) and the gradients are guaranteed to be faithful.
143
+
144
+ Finally, we verify the failure of pruning cases presented in Section 3.1 based on the signal propagation perspective. Specifically, we measure the singular value distribution of the input-output Jacobian $(\mathbf{J}^{0,K})$ for the 7-layer tanh MLP network, and the results are reported in Table 1. Note that while connection sensitivity based pruning is robust to moderate changes in the Jacobian singular values, it failed catastrophically when the condition number of the Jacobian is very large ( $>1\mathrm{e} + 11$ ). In fact, these failure cases correspond to the completely disconnected networks, as a consequence of pruning with unreliable connection sensitivity resulted from poorly conditioned initial Jacobians. As we will show subsequently, these findings extend to modern architectures, and layerwise dynamical isometry yields well-conditioned Jacobians and in turn the best pruning results.
145
+
146
+ # 4 SIGNAL PROPAGATION IN SPARSE NEURAL NETWORKS
147
+
148
+ So far, we have shown empirically and theoretically that layerwise dynamical isometry can improve the process of pruning at initialization. One remaining question to address is the following: how well do signals propagate in the pruned sparse networks? In this section, we first examine the effect of sparsity on signal propagation after pruning. We find that indeed pruning can break dynamical isometry, degrading trainability of sparse networks. Then we follow up to present a simple, but effective data-free method to recover approximate dynamical isometry on sparse networks.
149
+
150
+ Setup. The overall process is summarized as follows: Step 1. Initialize a network with a variance scaling (VS) or layerwise dynamical isometry (LDI) satisfying orthogonal initialization. Step 2. Prune at initialization for a sparsity level $\bar{\kappa}$ based on connection sensitivity (CS); we also test random (Rand) and magnitude (Mag) based pruning for comparison. Step 3. (optional) Enforce approximate dynamical isometry, if specified. Step 4. Train the pruned sparse network using SGD. We measure signal propagation (e.g., Jacobian singular values) on the sparse network right before Step 4, and observe training behavior during Step 4. Different methods are named as {A}-{B}-{C}, where A, B, C stand for initialization scheme, pruning method, (optional) approximate isometry, respectively. We perform this on 7-layer linear and tanh MLP networks as before<sup>3</sup>.
151
+
152
+ ![](images/1a88362cfba48921f1628dcea4a321ab40a26ee5a14231ee9135156dc20d52b9.jpg)
153
+ (a) Signal propagation
154
+
155
+ ![](images/0d44134d62b4fefd0deda2e9cc93223bbbe65787857e956e595f527e2cc22844.jpg)
156
+ Figure 2: (a) Signal propagation (mean Jacobian singular values) in sparse networks pruned for varying sparsity levels $\bar{\kappa}$ , and (b) training behavior of the sparse network at $\bar{\kappa} = 90\%$ . Signal propagation, pruning scheme, and overparameterization affect trainability of sparse neural networks. We train using SGD with the initial learning rate of 0.1 decayed by 1/10 at every 20k iterations. All results are the average over 10 runs. We provide other singular value statistics (max, min, std), accuracy plot, and extended training results for random and magnitude pruning in Appendix C.
157
+
158
+ ![](images/8904355e48b4ea260fce72b24c4eba558a6c8de2d4f4e09b6ce32d7094a0963e.jpg)
159
+ (b) Training behavior
160
+
161
+ ![](images/c2f79b91a7e48bddc1a9efea659f3faeaea775520a31e78fad736e5b6f02d2c4.jpg)
162
+
163
+ Effect of pruning on signal propagation and trainability. Let us first check signal propagation measurements in the pruned networks (see Figure 2a). In general, Jacobian singular values decrease continuously as the sparsity level increases (except for $\{\cdot\}-\{\cdot\}$ -AI which we will explain later), indicating that the more parameters are removed, the less faithful a network is likely to be with regard to propagating signals. Also, notice that the singular values drop more rapidly with random pruning compared to connection sensitivity based pruning methods (i.e., $\{\cdot\}$ -Rand vs. $\{\cdot\}$ -CS). This means that pruning using connection sensitivity is more robust to destruction of dynamical isometry and preserve better signal propagation in the sparse network than random pruning. We further note that, albeit marginal, layerwise dynamical isometry allows better signal propagation than variance scaling initialization, with relatively higher mean singular values and much lower standard deviations especially in the low sparsity regime (see Appendix C).
164
+
165
+ Now, we look into the relation between signal propagation and trainability of the sparse networks. Figure 2b shows training behavior of the pruned networks $(\bar{\kappa} = 90\%)$ obtained by different methods. We can see a clear correlation between signal propagation capability of a network and its training performance; i.e., the better a network propagates signals, the faster it converges during training. For instance, compare the trainability of a network before and after pruning. That is, compared to LDI-Dense $(\bar{\kappa} = 0)$ , LDI- $\{\mathrm{CS},\mathrm{Mag},\mathrm{Rand}\}$ decrease the loss much slowly; random pruning starts to decrease the loss around $4\mathrm{k}$ iteration, and finally reaches close to zero loss around $10\mathrm{k}$ iterations (see Appendix C), which is more than an order of magnitude slower than a network pruned by connection sensitivity. Recall that the pruned networks have much smaller singular values.
166
+
167
+ Enforcing approximate dynamical isometry. The observation above indicates that the better signal propagation is ensured on sparse networks, the better their training performs. This motivates us to think of the following: what if we can repair the broken isometry, before we start training the pruned network, such that we can achieve trainability comparable to that of the dense network? Precisely, we consider the following:
168
+
169
+ $$
170
+ \min _ {\mathbf {W} ^ {l}} \left\| \left(\mathbf {C} ^ {l} \odot \mathbf {W} ^ {l}\right) ^ {T} \left(\mathbf {C} ^ {l} \odot \mathbf {W} ^ {l}\right) - \mathbf {I} ^ {l} \right\| _ {F}, \tag {8}
171
+ $$
172
+
173
+ where $\mathbf{C}^l$ , $\mathbf{W}^l$ , $\mathbf{I}^l$ are the sparse mask obtained by pruning, the corresponding weights, the identity matrix at layer $l$ , respectively, and $\| \cdot \|_F$ is the Frobenius norm. We optimize this for all layers identically using gradient descent. Given the sparsity topology $\mathbf{C}^l$ and initial weights $\mathbf{W}^l$ , this data-free method attempts to find an optimal $\mathbf{W}^*$ such that the combination of the sparse topology and the weights to be layerwise orthogonal, potentially to the full rank capacity. This simple method (i.e., $\{\cdot\} - \{\cdot\}$ -AI, where AI is named for Approximate Isometry) turns out to be highly effective. The results are provided in Figure 2, and we summarize our key findings below:
174
+
175
+ - Signal propagation (LDI-{CS, Rand} vs. LDI-{CS, Rand}-AI). The decreased singular values (by pruning $\bar{\kappa} > 0$ ) bounce up dramatically and become close to the level before pruning. This means that orthogonality enforced by Equation 8 is achieved in the sparse topology of the pruned
176
+
177
+ Table 2: Pruning results for various neural networks on different datasets. All networks are pruned at initialization for the sparsity $\bar{\kappa} = 90\%$ based on connection sensitivity scores as in Lee et al. (2019). We report orthogonality scores (OS) and generalization errors (Error) on CIFAR-10 (VGG16, ResNets) and Tiny-ImageNet (WRN16); all results are the average over 5 runs. The first and second best results are highlighted in each column of errors. The orthogonal initialization with enforced approximate isometry method (i.e., LDI-AI) achieves the best results across all tested architectures.
178
+
179
+ <table><tr><td rowspan="2">Initialization</td><td colspan="2">VGG16</td><td colspan="2">ResNet32</td><td colspan="2">ResNet56</td><td colspan="2">ResNet110</td><td colspan="2">WRN16</td></tr><tr><td>OS</td><td>Error</td><td>OS</td><td>Error</td><td>OS</td><td>Error</td><td>OS</td><td>Error</td><td>OS</td><td>Error</td></tr><tr><td>VS-L</td><td>13.72</td><td>8.16</td><td>4.50</td><td>11.96</td><td>4.64</td><td>10.43</td><td>4.65</td><td>9.13</td><td>11.99</td><td>45.08</td></tr><tr><td>VS-G</td><td>13.60</td><td>8.18</td><td>4.55</td><td>11.89</td><td>4.67</td><td>10.60</td><td>4.67</td><td>9.17</td><td>11.50</td><td>44.56</td></tr><tr><td>VS-H</td><td>15.44</td><td>8.36</td><td>4.41</td><td>12.21</td><td>4.44</td><td>10.63</td><td>4.39</td><td>9.08</td><td>13.49</td><td>46.62</td></tr><tr><td>LDI</td><td>13.33</td><td>8.11</td><td>4.43</td><td>11.55</td><td>4.51</td><td>10.08</td><td>4.57</td><td>8.88</td><td>11.28</td><td>44.20</td></tr><tr><td>LDI-AI</td><td>6.43</td><td>7.99</td><td>2.62</td><td>11.47</td><td>2.79</td><td>9.85</td><td>2.92</td><td>8.78</td><td>6.62</td><td>44.12</td></tr></table>
180
+
181
+ Table 3: Pruning results for VGG16 and ResNet32 with different activation functions on CIFAR-10. We report generalization errors (avg. over 5 runs), and the first and second best results are highlighted.
182
+
183
+ <table><tr><td rowspan="2">Initialization</td><td colspan="3">VGG16</td><td colspan="3">ResNet32</td></tr><tr><td>tanh</td><td>l-relu</td><td>selu</td><td>tanh</td><td>l-relu</td><td>selu</td></tr><tr><td>VS-L</td><td>9.07</td><td>7.78</td><td>8.70</td><td>13.41</td><td>12.04</td><td>12.26</td></tr><tr><td>VS-G</td><td>9.06</td><td>7.84</td><td>8.82</td><td>13.44</td><td>12.02</td><td>12.32</td></tr><tr><td>VS-H</td><td>9.99</td><td>8.43</td><td>9.09</td><td>13.12</td><td>11.66</td><td>12.21</td></tr><tr><td>LDI</td><td>8.76</td><td>7.53</td><td>8.21</td><td>13.22</td><td>11.58</td><td>11.98</td></tr><tr><td>LDI-AI</td><td>8.72</td><td>7.47</td><td>8.20</td><td>13.14</td><td>11.51</td><td>11.68</td></tr></table>
184
+
185
+ Table 4: Unsupervised pruning results for $K$ -layer MLP networks on MNIST. All networks are pruned for sparsity $\bar{\kappa} = 90\%$ at orthogonal initialization. We report generalization errors (avg. over 10 runs).
186
+
187
+ <table><tr><td>Loss</td><td>Superv.</td><td>K=3</td><td>K=5</td><td>K=7</td></tr><tr><td>GT</td><td>✓</td><td>2.46</td><td>2.43</td><td>2.61</td></tr><tr><td>Pred. (raw)</td><td>✗</td><td>3.31</td><td>3.38</td><td>3.60</td></tr><tr><td>Pred. (softmax)</td><td>✗</td><td>3.11</td><td>3.37</td><td>3.56</td></tr><tr><td>Unif.</td><td>✗</td><td>2.77</td><td>2.77</td><td>2.94</td></tr></table>
188
+
189
+ network (i.e., approximate dynamical isometry), and therefore, signal propagation on the sparse network is likely to behave similarly to the dense network. As expected, the training performance increased significantly (e.g., compare LDI-CS with LDI-CS-AI for trainability). This works more dramatically for random pruning; i.e., even for randomly pruned sparse networks, training speed increases significantly, implying the benefit of ensuring signal propagation.
190
+
191
+ - Structure (LDI-Rand-AI vs. LDI-CS-AI). Even if the approximate dynamical isometry is enforced identically, the network pruned using connection sensitivity shows better trainability than the randomly pruned network. This potentially means that the sparse topology obtained by different pruning methods also matters, in addition to signal propagation characteristics.
192
+ - Overparameterization (LDI-Dense vs. LDI-{CS, Rand}-AI). Even though the singular values are restored to a level close to before pruning with approximate isometry, the non-pruned dense network converges faster than pruned networks. We hypothesize that in addition to signal propagation, overparameterization helps in optimization taking less time to find a minimum.
193
+
194
+ While being simple and data free (thus fast), our signal propagation perspective not only can be used to improve trainability of sparse neural networks, but also to complement a common explanation for decreased trainability of compressed networks which is often attributed merely to a reduced capacity. Our results also extend to the case of convolutional neural network (see Figure 8 in Appendix C).
195
+
196
+ # 5 VALIDATION AND EXTENSIONS
197
+
198
+ In this section, we aim to demonstrate the efficacy of our signal propagation perspective on a wide variety of settings. We first evaluate the idea of employing layerwise dynamical isometry on various modern neural networks. In addition, we further study the role of supervision under the pruning at initialization regime, extending it to unsupervised pruning. Our results show that indeed, pruning can be approached from the signal propagation perspective at varying scale, bringing forth the notion of neural architecture sculpting. The experiment settings used to generate the presented results are detailed in Appendix B. The code can be found here: https://github.com/namhoonlee/spp-public.
199
+
200
+ # 5.1 EVALUATION ON VARIOUS NEURAL NETWORKS AND DATASETS
201
+
202
+ Here, we verify that our signal propagation perspective for pruning neural networks at initialization is indeed valid, by evaluating further on various modern neural networks and datasets. To this end, we provide orthogonality scores (OS) and generalization errors of the sparse networks obtained by different methods and show that layerwise dynamical isometry with enforced approximate isometry results in the best performance; here, we define OS as $\frac{1}{l}\sum_{l}\| (\mathbf{C}^{l}\odot \mathbf{W}^{l})^{T}(\mathbf{C}^{l}\odot \mathbf{W}^{l}) - \mathbf{I}^{l}\|_{F}$ , which can be used to indicate how close are the weight matrices in each layer of the pruned network to being orthogonal. All results are the average of 5 runs, and we do not optimize anything specific for a particular case (see Appendix B for experiment settings). The results are presented in Table 2.
203
+
204
+ The best pruning results are achieved when the approximate dynamical isometry is enforced on the pruned sparse network (i.e., LDI-AI), across all tested architectures. Also, the second best results are achieved with the orthogonal initialization that satisfies layerwise dynamical isometry (i.e., LDI). Looking closely, it is evident that there exists a high correlation between the orthogonality scores and the performance of pruned networks; i.e., the network initialized to have the lowest orthogonality scores achieves the best generalization errors after training. Note that the orthogonality scores being close to 0, by definition, states how faithful a network will be with regard to letting signals propagate without being amplified or attenuated. Therefore, the fact that a pruned network with the lowest orthogonality scores tends to yield good generalization errors further validates that our signal propagation perspective is indeed effective for pruning at initialization. Moreover, we test for other nonlinear activation functions (tanh, leaky-relu, selu), and found that the orthogonal initialization consistently outperforms variance scaling methods (see Table 3).
205
+
206
+ # 5.2 PRUNING WITHOUT SUPERVISION
207
+
208
+ So far, we have shown that pruning random networks can be approached from a signal propagation perspective by ensuring faithful connection sensitivity. Another factor that constitutes connection sensitivity is the loss term. At a glance, it is not obvious how informative the supervised loss measured on a random network will be for connection sensitivity. In this section, we look into the effect of supervision, by simply replacing the loss computed using ground-truth labels with different unsupervised surrogate losses as follows: replacing the target distribution using ground-truth labels with uniform distribution (Unif.), and using the averaged output prediction of the network (Pred.; softmax/raw). The results for MLP networks are in Table 4. Even though unsupervised pruning results are not as good as the supervised case, the results are still interesting, especially for the uniform case, in that there was no supervision given. We thus experiment further for the uniform case on other networks, and obtain the following results: 8.25, 11.69, 11.01, 8.82 errors (\%) for VGG16, ResNet32, ResNet56, ResNet110, respectively. Surprisingly, the results are often competitive to that of pruning with supervision (i.e., compare to LDI results in Table 2). Notably, previous pruning algorithms assume the existence of supervision a priori. Being the first demonstration, along with the signal propagation perspective, this unsupervised pruning strategy can be useful in scenarios where there are no labels or only weak supervision is available.
209
+
210
+ To demonstrate further, we also conducted transfer of sparsity experiments such as transferring a pruned network from one task to another (MNIST $\leftrightarrow$ Fashion-MNIST). Table 5 shows that, while pruning results may degrade if sparsity is transferred, or done without supervision, less impact is caused for unsupervised pruning when transferred to a different task (i.e., 0.52 to 0.14 on MNIST, and 1.11 to -0.78 on F
211
+
212
+ Table 5: Transfer of sparsity experiment results for LeNet. We prune for $\bar{\kappa} = 97\%$ at orthogonal initialization, and report gen. errors (average over 10 runs).
213
+
214
+ <table><tr><td rowspan="2">Category</td><td colspan="2">Dataset</td><td colspan="2">Error</td><td rowspan="2">Error rand</td></tr><tr><td>prune</td><td>train&amp;test</td><td>sup. → unsup.</td><td>(Δ)</td></tr><tr><td>Standard</td><td>MNIST</td><td>MNIST</td><td>2.42 → 2.94</td><td>+0.52</td><td>15.56</td></tr><tr><td>Transfer</td><td>F-MNIST</td><td>MNIST</td><td>2.66 → 2.80</td><td>+0.14</td><td>18.03</td></tr><tr><td>Standard</td><td>F-MNIST</td><td>F-MNIST</td><td>11.90 → 13.01</td><td>+1.11</td><td>24.72</td></tr><tr><td>Transfer</td><td>MNIST</td><td>F-MNIST</td><td>14.17 → 13.39</td><td>-0.78</td><td>24.89</td></tr></table>
215
+
216
+ MNIST). This indicates that inductive bias exists in data, affecting transfer and unsupervised pruning, and potentially, that "universal" sparse topology might be obtainable if universal data distribution is known (e.g., extremely large dataset in practice). This may help in situations where different tasks from unknown data distribution are to be performed (e.g., continual learning). We also tested two other unsupervised losses, but none performed as well as uniform loss (e.g., Jacobian norms $\| J\| _1$ : 5.03, $\| J\| _2$ : 3.00 vs. Unif.: 2.94), implying that if pruning is to be unsupervised, the uniform loss would better be used, because other unsupervised losses depend on the input data (thus can suffer from inductive bias). Random pruning degrades significantly at high sparsity for all cases.
217
+
218
+ # 5.3 NEURAL ARCHITECTURE SCULPTING
219
+
220
+ We have shown that pruning at initialization, even when no supervision is provided, can be effective based on the signal propagation perspective. This begs the question of whether pruning needs to be limited to pre-shaped architectures or not. In other words, what if pruning is applied to an arbitrarily bulky network and is treated as sculpting an architecture? In order to find out, we conduct the following experiments: we take a popular pre-designed architecture (ResNet20 in He et al. (2016)) as a base network, and consider a range of variants that are originally bigger than the base model, but pruned to have the same number of parameters as the base dense network. Specifically, we consider the following equivalents: (1) the same number of residual blocks, but with larger widths; (2) a reduced number of residual blocks with larger widths; (3) a larger residual block and the same width (see Table 6 in Appendix B for details). The results are presented in Figure 3.
221
+
222
+ Overall, the sparse equivalents record lower errors than the dense base model. Notice that some models are extremely sparse (e.g., Equivalent 1 pruned for $\bar{\kappa} = 98.4\%$ ). While all networks have the same number of parameters, discovered sparse equivalents outperform the dense reference network. This result is well aligned with recent findings in Kalchbrenner et al. (2018): large sparse networks can outperform their small dense counterpart, while enjoying increased computational and memory efficiency via a dedicated implementation for sparsity in practice. Also, it seems that pruning wider networks tends to be more effective in producing a better model than pruning deeper ones (e.g., Equivalent 1 vs. Equivalent 3). We further note that unlike existing prior works, the sparse networks are discovered by sculpting arbitrarily-designed architecture, without pretraining nor supervision.
223
+
224
+ ![](images/5cb713bef823e0cde1435dcfedf9b2038e1b85c58a46bcda4d0c412596e6645f.jpg)
225
+ Figure 3: Neural architecture sculpting results on CIFAR-10. We report generalization errors (avg. over 5 runs). All networks have the same number of parameters (269k) and trained identically.
226
+
227
+ # 6 DISCUSSION AND FUTURE WORK
228
+
229
+ In this work, we have approached the problem of pruning neural networks at initialization from a signal propagation perspective. Based on observing the effect of varying the initialization, we found that initial weights have a critical impact on connection sensitivity measurements and hence pruning results. This led us to conduct theoretical analysis based on dynamical isometry and a mean field theory, and formally characterize a sufficient condition to ensure faithful signal propagation in a given network. Moreover, our analysis on compressed neural networks revealed that signal propagation characteristics of a sparse network highly correlates with its trainability, and also that pruning can break dynamical isometry ensured on a network at initialization, resulting in degradation of trainability of the compressed network. To address this, we introduced a simple, yet effective data-free method to recover the orthogonality and enhance trainability of the compressed network. Finally, throughout a range of validation and extension experiments, we verified that our signal propagation perspective is effective for understanding, improving, and extending the task of pruning at initialization across various settings. We believe that our results on the increased trainability of compressed networks can take us one step towards finding "winning lottery ticket" (i.e., a set of initial weights that given a sparse topology can quickly reach to a generalization performance that is comparable to the uncompressed network, once trained) suggested in Frankle & Carbin (2019).
230
+
231
+ We point out, however, that there remains several aspects to consider. While pruning on enforced isometry produces trainable sparse networks, the two-stage orthogonalization process (i.e., prune first and enforce the orthogonality later) can be suboptimal especially at a high sparsity level. Also, network weights change during training, which can affect signal propagation characteristics, and therefore, dynamical isometry may not continue to hold over the course of training. We hypothesize that a potential key to successful neural network compression is to address the complex interplay between optimization and signal propagation, and it might be immensely beneficial if an optimization naturally takes place in the space of isometry. We believe that our signal propagation perspective provides a means to formulate this as an optimization problem by maximizing the trainability of sparse networks while pruning, and we intend to explore this direction as a future work.
232
+
233
+ # ACKNOWLEDGMENTS
234
+
235
+ This work was supported by the ERC grant ERC-2012-AdG 321162-HELIOS, EPSRC grant See-bibyte EP/M013774/1, EPSRC/MURI grant EP/N019474/1 and the Australian Research Council Centre of Excellence for Robotic Vision (project number CE140100016). We would also like to acknowledge the Royal Academy of Engineering and FiveAI, and thank Richard Hartley, Puneet Dokania and Amartya Sanyal for helpful discussions.
236
+
237
+ # REFERENCES
238
+
239
+ Jonathan Frankle and Michael Carbin. The lottery ticket hypothesis: Finding sparse, trainable neural networks. *ICLR*, 2019.
240
+ Xavier Glorot and Yoshua Bengio. Understanding the difficulty of training deep feedforward neural networks. AISTATS, 2010.
241
+ Song Han, Huizi Mao, and William J Dally. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. *ICLR*, 2016.
242
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Delving deep into rectifiers: Surpassing human-level performance on imagenet classification. ICCV, 2015.
243
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. CVPR, 2016.
244
+ Geoffrey E Hinton and Ruslan R Salakhutdinov. Reducing the dimensionality of data with neural networks. Science, 2006.
245
+ Nal Kalchbrenner, Erich Elsen, Karen Simonyan, Seb Noury, Norman Casagrande, Edward Lockhart, Florian Stimberg, Aaron van den Oord, Sander Dieleman, and Koray Kavukcuoglu. Efficient neural audio synthesis. ICML, 2018.
246
+ Yann LeCun, Léon Bottou, Genevieve B. Orr, and Klaus-Robert Müller. Efficient backprop. Neural Networks: Tricks of the Trade, 1998.
247
+ Yann LeCun, Joshua Bengio, and Geoffrey Hinton. Deep learning. Nature, 2015.
248
+ Namhoon Lee, Thalaiyasingam Ajanthan, and Philip HS Torr. Snip: Single-shot network pruning based on connection sensitivity. *ICLR*, 2019.
249
+ Razvan Pascanu, Tomas Mikolov, and Yoshua Bengio. On the difficulty of training recurrent neural networks. 2013.
250
+ Jeffrey Pennington, Samuel Schoenholz, and Surya Ganguli. Resurrecting the sigmoid in deep learning through dynamical isometry: theory and practice. NeurIPS, 2017.
251
+ Jeffrey Pennington, Samuel S Schoenholz, and Surya Ganguli. The emergence of spectral universality in deep networks. AISTATS, 2018.
252
+ Ben Poole, Subhaneil Lahiri, Maithra Raghu, Jascha Sohl-Dickstein, and Surya Ganguli. Exponential expressivity in deep neural networks through transient chaos. NeurIPS, 2016.
253
+ Russell Reed. Pruning algorithms-a survey. Neural Networks, 1993.
254
+ Andrew M Saxe, James L McClelland, and Surya Ganguli. Exact solutions to the nonlinear dynamics of learning in deep linear neural networks. *ICLR*, 2014.
255
+ Samuel S Schoenholz, Justin Gilmer, Surya Ganguli, and Jascha Sohl-Dickstein. Deep information propagation. *ICLR*, 2017.
256
+ Wojciech Tarnowski, Piotr Warchol, Stanisław Jastrzebski, Jacek Tabor, and Maciej A Nowak. Dynamical isometry is achieved in residual networks in a universal way for any activation function. AISTATS, 2019.
257
+
258
+ Lechao Xiao, Yasaman Bahri, Jascha Sohl-Dickstein, Samuel S Schoenholz, and Jeffrey Pennington. Dynamical isometry and a mean field theory of cnns: How to train 10,000-layer vanilla convolutional neural networks. ICML, 2018.
259
+
260
+ Ge Yang and Samuel Schoenholz. Mean field residual networks: On the edge of chaos. NeurIPS, 2017.
261
+
262
+ Greg Yang, Jeffrey Pennington, Vinay Rao, Jascha Sohl-Dickstein, and Samuel S Schoenholz. A mean field theory of batch normalization. *ICLR*, 2019.
263
+
264
+ # A GRADIENTS IN TERMS OF JACOBIANS
265
+
266
+ Proposition 1. Let $\epsilon = \partial L / \partial \mathbf{x}^K$ denote the error signal and $\mathbf{x}^0$ denote the input signal. Then,
267
+
268
+ 1. the gradients satisfy:
269
+
270
+ $$
271
+ \mathbf {g} _ {\mathbf {w} ^ {l}} ^ {T} = \epsilon \mathbf {J} ^ {l, K} \mathbf {D} ^ {l} \otimes \mathbf {x} ^ {l - 1}, \tag {9}
272
+ $$
273
+
274
+ where $\mathbf{J}^{l,K} = \partial \mathbf{x}^K /\partial \mathbf{x}^l$ is the Jacobian from layer $l$ to the output and $\otimes$ is the Kronecker product. 2. additionally, for linear networks, i.e., when $\phi$ is the identity:
275
+
276
+ $$
277
+ \mathbf {g} _ {\mathbf {w} ^ {l}} ^ {T} = \epsilon \mathbf {J} ^ {l, K} \otimes \left(\mathbf {J} ^ {0, l - 1} \mathbf {x} ^ {0} + \mathbf {a}\right), \tag {10}
278
+ $$
279
+
280
+ where $\mathbf{J}^{0,l - 1} = \partial \mathbf{x}^{l - 1} / \partial \mathbf{x}^0$ is the Jacobian from the input to layer $l - 1$ and $\mathbf{a}\in \mathbb{R}^N$ is the constant term that does not depend on $\mathbf{x}^0$ .
281
+
282
+ Proof. The proof is based on a simple algebraic manipulation of the chain rule. The gradient of the loss with respect to the weight matrix $\mathbf{W}^l$ can be written as:
283
+
284
+ $$
285
+ \mathbf {g} _ {\mathbf {w} ^ {l}} = \frac {\partial L}{\partial \mathbf {W} ^ {l}} = \frac {\partial L}{\partial \mathbf {x} ^ {K}} \frac {\partial \mathbf {x} ^ {K}}{\partial \mathbf {x} ^ {l}} \frac {\partial \mathbf {x} ^ {l}}{\partial \mathbf {W} ^ {l}}. \tag {11}
286
+ $$
287
+
288
+ Here, the gradient $\partial \mathbf{y} / \partial \mathbf{x}$ is represented as a matrix of dimension $\mathbf{y}$ -size $\times$ $\mathbf{x}$ -size. For gradients with respect to matrices, their vectorized from is used. Notice,
289
+
290
+ $$
291
+ \frac {\partial \mathbf {x} ^ {l}}{\partial \mathbf {W} ^ {l}} = \frac {\partial \mathbf {x} ^ {l}}{\partial \mathbf {h} ^ {l}} \frac {\partial \mathbf {h} ^ {l}}{\partial \mathbf {W} ^ {l}} = \mathbf {D} ^ {l} \frac {\partial \mathbf {h} ^ {l}}{\partial \mathbf {W} ^ {l}}. \tag {12}
292
+ $$
293
+
294
+ Considering the feed-forward dynamics for a particular neuron $i$ ,
295
+
296
+ $$
297
+ h _ {i} ^ {l} = \sum_ {j} W _ {i j} ^ {l} x _ {j} ^ {l - 1} + b _ {i} ^ {l}, \tag {13}
298
+ $$
299
+
300
+ $$
301
+ \frac {\partial h _ {i} ^ {l}}{\partial W _ {i j} ^ {l}} = x _ {j} ^ {l - 1}.
302
+ $$
303
+
304
+ Therefore, using the Kronecker product, we can compactly write:
305
+
306
+ $$
307
+ \frac {\partial \mathbf {x} ^ {l}}{\partial \mathbf {W} ^ {l}} = (\mathbf {D} ^ {l}) ^ {T} \otimes (\mathbf {x} ^ {l - 1}) ^ {T}. \tag {14}
308
+ $$
309
+
310
+ Now, Equation 11 can be written as:
311
+
312
+ $$
313
+ \mathbf {g} _ {\mathbf {w} ^ {l}} = \left(\epsilon \mathbf {J} ^ {l, K} \mathbf {D} ^ {l}\right) ^ {T} \otimes \left(\mathbf {x} ^ {l - 1}\right) ^ {T}, \tag {15}
314
+ $$
315
+
316
+ $$
317
+ \mathbf {g} _ {\mathbf {w} ^ {l}} ^ {T} = \epsilon \mathbf {J} ^ {l, K} \mathbf {D} ^ {l} \otimes \mathbf {x} ^ {l - 1}.
318
+ $$
319
+
320
+ Here, $\mathbf{A}^T\otimes \mathbf{B}^T = (\mathbf{A}\otimes \mathbf{B})^T$ is used. Moreover, for linear networks $\mathbf{D}^l = \mathbf{I}$ and $\mathbf{x}^l = \mathbf{h}^l$ for all $l\in \{1\dots K\}$ . Therefore, $\mathbf{x}^{l - 1}$ can be written as:
321
+
322
+ $$
323
+ \begin{array}{l} \mathbf {x} ^ {l - 1} = \phi \left(\mathbf {W} ^ {l - 1} \phi \left(\mathbf {W} ^ {l - 2} \dots \phi \left(\mathbf {W} ^ {1} \mathbf {x} ^ {0} + \mathbf {b} ^ {1}\right) \dots + \mathbf {b} ^ {l - 2}\right) + \mathbf {b} ^ {l - 1}\right), \tag {16} \\ = \mathbf {W} ^ {l - 1} \left(\mathbf {W} ^ {l - 2} \dots \left(\mathbf {W} ^ {1} \mathbf {x} ^ {0} + \mathbf {b} ^ {1}\right) \dots + \mathbf {b} ^ {l - 2}\right) + \mathbf {b} ^ {l - 1}, \\ = \prod_ {k = 1} ^ {l - 1} \mathbf {W} ^ {k} \mathbf {x} ^ {0} + \prod_ {k = 2} ^ {l - 1} \mathbf {W} ^ {k} \mathbf {b} ^ {1} + \dots + \mathbf {b} ^ {l - 1}, \\ = \mathbf {J} ^ {0, l - 1} \mathbf {x} ^ {0} + \mathbf {a}, \\ \end{array}
324
+ $$
325
+
326
+ where $\mathbf{a}$ is the constant term that does not depend on $\mathbf{x}^0$ . Hence, the proof is complete.
327
+
328
+ ![](images/a15fd43979184f10a9b26a2d80130d63ecae216c8e1988bdca303df178bd4640.jpg)
329
+
330
+ # B EXPERIMENT SETTINGS
331
+
332
+ Pruning at initialization. By default, we perform pruning at initialization based on connection sensitivity scores as in Lee et al. (2019). When computing connection sensitivity, we always use all examples in the training set to prevent stochasticity by a particular mini-batch. Unless stated otherwise, we set the default sparsity level to be $\bar{\kappa} = 90\%$ (i.e., $90\%$ of the entire parameters in a network is pruned away). For all tested architectures, pruning for such level of sparsity does not lead to a large accuracy drop. Additionally, we perform either random pruning (at initialization) or a magnitude based pruning (at pretrained) for comparison purposes. Random pruning refers to pruning parameters randomly and globally for a given sparsity level. For the magnitude based pruning, we first train a model and simply prune parameters globally in a single-shot based on the magnitude of the pretrained parameters (i.e., keep the large weights while pruning small ones). For initialization methods, we follow either variance scaling initialization schemes (i.e., VS-L, VS-G, VS-H, as in LeCun et al. (1998); Glorot & Bengio (2010); He et al. (2015), respectively) or (convolutional) orthogonal initialization schemes (Saxe et al., 2014; Xiao et al., 2018).
333
+
334
+ Training and evaluation. Throughout experiments, we evaluate pruning results on MNIST, CIFAR-10, and Tiny-ImageNet image classification tasks. For training of the pruned sparse networks, we use SGD with momentum and train up to 80k (for MNIST) or 100k (for CIFAR-10 and Tiny-ImageNet) iterations. The initial learning rate is set to be 0.1 and is decayed by 1/10 at every 20k (MNIST) or 25k (CIFAR-10 and Tiny-ImageNet). The mini-batch size is set to be 100, 128, 200 for MNIST, CIFAR-10, Tiny-ImageNet, respectively. We do not optimize anything specific for a particular case, and follow the standard training procedure. For all experiments, we use $10\%$ of training set for the validation set, which corresponds to 5400, 5000, 9000 images for MNIST, CIFAR-10, Tiny-IamgeNet, respectively. We evaluate at every 1k iteration, and record the lowest test error. All results are the average of either 10 (for MNIST) or 5 (for CIFAR-10 and Tiny-ImageNet) runs.
335
+
336
+ Signal propagation and approximate dynamical isometry. We use the entire training set when computing Jacobian singular values of a network. In order to enforce approximate dynamical isometry when specified, given a pruned sparse network, we optimize for the objective in Equation 8 using gradient descent. The learning rate is set to be 0.1 and we perform 10k gradient update steps (although it usually reaches to convergence far before). This process is data-free and thus fast; e.g., depending on the size of the network and the number of update steps, it can take less than a few seconds on a modern computer.
337
+
338
+ Neural architecture sculpting. We provide the model details in Table 6.
339
+
340
+ Table 6: All models (Equivalents 1,2,3) are initially bigger than the base network (ResNet20), by either being wider or deeper, but pruned to have the same number of parameters as the base network (269k). The widening factor (k) refers to the filter multiplier; e.g., for the basic filter size of 16, the widening factor of $\mathrm{k} = 2$ will result in 32 filters. The block size refers to the number of residual blocks in each block layer; all models have three block layers. More/less number of residual blocks means the network is deeper/shaller. The reported generalization errors are averages over 5 runs. We find that the technique of architecture sculpting, pruning randomly initialized neural networks based on our signal propagation perspective even in the absence of ground-truth supervision, can be used to find models of superior performance under the same parameter budget.
341
+
342
+ <table><tr><td>Model category</td><td>Shape</td><td>Widening (k)</td><td>Block size</td><td>Init.</td><td>GT</td><td>Sparsity</td><td>Error</td></tr><tr><td>Base</td><td>ResNet20 (He et al., 2016)</td><td>1</td><td>3</td><td>VS-H</td><td>✓</td><td>0.0</td><td>8.046</td></tr><tr><td rowspan="4">Equivalent 1</td><td>wider</td><td>2</td><td>3</td><td>LDI</td><td>✗</td><td>74.8</td><td>7.618</td></tr><tr><td>wider</td><td>4</td><td>3</td><td>LDI</td><td>✗</td><td>93.7</td><td>7.630</td></tr><tr><td>wider</td><td>6</td><td>3</td><td>LDI</td><td>✗</td><td>97.2</td><td>7.708</td></tr><tr><td>wider</td><td>8</td><td>3</td><td>LDI</td><td>✗</td><td>98.4</td><td>7.836</td></tr><tr><td rowspan="3">Equivalent 2</td><td>wider &amp; shallower</td><td>2</td><td>2</td><td>LDI</td><td>✗</td><td>60.4</td><td>7.776</td></tr><tr><td>wider &amp; shallower</td><td>4</td><td>2</td><td>LDI</td><td>✗</td><td>90.1</td><td>7.876</td></tr><tr><td>wider &amp; shallower</td><td>6</td><td>2</td><td>LDI</td><td>✗</td><td>95.6</td><td>7.940</td></tr><tr><td>Equivalent 3</td><td>deeper</td><td>1</td><td>5</td><td>LDI</td><td>✗</td><td>42.0</td><td>7.912</td></tr></table>
343
+
344
+ # C SIGNAL PROPAGATION IN SPARSE NETWORKS: ADDITIONAL RESULTS
345
+
346
+ ![](images/75d58dfbf1029ecf58e1d394ba0ceb404e083098d20e49ede0eaee48e6770fe3.jpg)
347
+ linear $(K = 7)$
348
+
349
+ ![](images/c826844ef9561bbeecb978a08dddea69879e7e398860d6480717ecff9579f6cf.jpg)
350
+ tanh (K=7)
351
+
352
+ ![](images/4620c22575a6c5cc26ec300230b86acb65154fecd70deda71e257d66af03ee25.jpg)
353
+ linear $(K = 7)$
354
+
355
+ ![](images/2e64de07f82287ae0f8abbbe5b2aeb502a1354457ca378e14bcb0ca3b79d362a.jpg)
356
+ linear $(K = 7)$
357
+
358
+ ![](images/11559eca14468b37fdf6528356aeedfcbfe61e98fe92f9e39ac3d43d4ac494b1.jpg)
359
+ tanh $(\mathrm{K} = 7)$
360
+
361
+ ![](images/530344968100a24bfb877a3c468734d47baf504f3a01577fdea6785424acfc44.jpg)
362
+ (a) Signal propagation (all statistics)
363
+
364
+ ![](images/9b46de0c83366488c03885239a32a61aa5d66321bf45d7438533d2ab3de13192.jpg)
365
+ tanh (K=7)
366
+
367
+ ![](images/914f53e16e1cd5809259789e5197bbca3565beb29f7b4a1f007d606f6a03a9fe.jpg)
368
+ tanh $(\mathrm{K} = 7)$
369
+
370
+ ![](images/e8889add26d30b68a09151224307268850156c68a9cc192008eb5235e81bc4e1.jpg)
371
+ linear $(K = 7)$
372
+
373
+ ![](images/55351ab648f6aac6d481dcd0ee5bea744a1d7b9ba2114f69363f06d65713401e.jpg)
374
+ linear $(K = 7)$
375
+ (b) Training behavior (loss and accuracy)
376
+
377
+ ![](images/47f7060486220bd52b0edbc36b2951b4cc3f30074c47656ab2f8d7203394c4db.jpg)
378
+ tanh (K=7)
379
+
380
+ ![](images/467a8d6793d3c65aa7327ec01877866c22b247f2737acca3a73fb983a34632e6.jpg)
381
+ tanh (K=7)
382
+ Figure 4: Full results for (a) signal propagation (all signular value statistics), and (b) training behavior (including accuracy) for 7-layer linear and tanh MLP networks. We provide results of LDI-Rand, LDI-Rand-AI, VS-CS, LDI-CS, LDI-CS-AI on the linear case for both singular value statistics and training log. We also plot results of LDI-Mag and LDI-Dense on the tanh case for trainability; the training results of non-pruned (LDI-Dense) and magnitude (LDI-Mag) pruning are only reported for the tanh case, because the learning rate had to be lowered for the linear case (otherwise it explodes), which makes the comparison not entirely fair. We provide the singular value statistics for the magnitude pruning in Figure 6 to avoid clutter. Also, extended training logs for random and magnitude based pruning are provided separately in Figure 5 to illustrate the difference in convergence speed.
383
+
384
+ ![](images/5602bd04bce9a2cd4e84fbe41b46fc19e6dab63f386f670d33d8f9037bdb6d0b.jpg)
385
+ (a) Training behavior
386
+
387
+ ![](images/d42e0d172125fbb9829cbb36d0006ee8611ef86484e3edc05e68b7769fa08058.jpg)
388
+
389
+ ![](images/1407e418e128913380020fa5feaca4621e7e0478b063985c2460e4d17e684597.jpg)
390
+
391
+ ![](images/8a4603b34f36db4137a5608206e5edc97e39bc51eee90d5ce74236e03f03974f.jpg)
392
+ Figure 5: Extended training log (i.e., Loss and Accuracy) for random (Rand) and magnitude (Mag) pruning. The sparse networks obtained by random or magnitude pruning take a much longer time to train than that obtained by pruning based on connection sensitivity. All methods are pruned at the layerwise orthogonal initialization, and trained the same way as before.
393
+
394
+ ![](images/8649690c9dcce16e7c04dfd6292bf5b77c8a3b83c49035c132dfa5e38c6a84e8.jpg)
395
+
396
+ ![](images/dbf94f9e048ef22407b807ea8a14e1a0efe1fe734ba030f0e748016966cf2b5c.jpg)
397
+
398
+ ![](images/e28b95f17ab91445b5670399428d7f2e4e5d942dae07fe5419fc5bbe5bddff2d.jpg)
399
+ (a) Signal propagation (all statistics; magnitude based pruning)
400
+
401
+ ![](images/afd93c049b2584daa4035474d60e8bf66f2842320026607f4cf9da5dcba668e1.jpg)
402
+
403
+ ![](images/af60b59dc9e0de90666bb8c5ff3bc6fe4588c732d740c0b6d5affbc9ea2521d4.jpg)
404
+
405
+ ![](images/80ded1d01714665490d0fe2a552f6c2e6f547cd36e952ccc7ff46b4b8972520b.jpg)
406
+
407
+ Figure 6: Signal propagation measurments (all signular value statistics) for the magnitude based pruning (Mag) on the 7-layer linear and tanh MLP networks. As described in the experiment settings in Appendix B, the magnitude based pruning is performed on a pretrained model. Notice that unlike other cases where pruning is done at initialization (i.e., using either random or connection sensitivity based pruning methods), the singular value distribution changes abruptly when pruned (i.e., note of the sharp change of singular values from 0 to $10\%$ sparsity). Also, the singular values are not concentrated (note of high standard deviations), which explains rather inferior trainability compared to other methods. We conjecture that naively pruning based on the magnitude of parameters in a single-shot, without pruning gradually or employing some sophisticated tricks such as layerwise thresholding, can lead to a failure of training compressed networks.
408
+
409
+ ![](images/88d0cdc4eb3610675ae0a44158f8d9523fbed446a7ac0a126980d7aa54810c94.jpg)
410
+ Figure 7: Signal propagation and training behavior for ReLU and Leaky-ReLU activation functions. They resemble those of the tanh case as in Figure 2, and hence the conclusion holds about the same.
411
+
412
+ ![](images/2d35c1aa87cbbd9106e26ee9ef038495e2bb03188302af789a0b36bd9d378833.jpg)
413
+
414
+ ![](images/f96c9ee1c4c80d11a6169f8270290899291cd251c58ce7aabf43aa234d5a4e75.jpg)
415
+
416
+ ![](images/8e07c1178bafa5aa8f414c70f5260c01822073e76764c5b2cac529ee1596de32.jpg)
417
+
418
+ ![](images/68dfd445c93d88bdf3d3b0bcc3541af01991bc966c7e257a4f36470e0a60eb88.jpg)
419
+
420
+ ![](images/de42a2545879dab4e551becf2743ee0635fbda2a005cb1a744cdba8d3db7c6f5.jpg)
421
+ Figure 8: Training performance (loss and accuracy) by different methods for VGG16 on CIFAR-10. To examine the effect of initialization in isolation on the trainability of sparse neural networks, we remove batch normalization (BN) layers for this experiment, as BN tends to improve training speed as well as generalization performance. As a result, enforcing approximate isometry (LDI-CS-AIF) improves the training speed quite dramatically compared to the pruned network without isometry (LDI-CS). We also find that even compared to the non-pruned dense network (LDI-Dense) which is ensured layerwise dynamical isometry, LDI-CS-AIF trains faster in the early training phase. This result is quite promising and more encouraging than the previous case of MLP (see Figures 2 and 7), as it potentially indicates that an underparameterized network (by connection sensitivity pruning) can even outperform an overparameterized network, at least in the early phase of neural network training. Furthermore, we add results of using the spectral norm in enforcing approximate isometry in Equation 8 (LDI-CS-AIS), and find that it also trains faster than the case of broken isometry (LDI-CS), yet not as much as the case of using the Frobenius norm (LDI-CS-AIF).
asignalpropagationperspectiveforpruningneuralnetworksatinitialization/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c10718393a8969c45e3edba5b7faa11a2d52c2b13ff4d3f941a1df67770817a
3
+ size 904466
asignalpropagationperspectiveforpruningneuralnetworksatinitialization/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:567a59f9841cc25234799c0f4225e2a6458adb1ebe4793907549a905e6d252de
3
+ size 554312
asymptoticsofwidenetworksfromfeynmandiagrams/95e4fdf1-a2a2-4bcc-8d43-612412d85781_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f69f80ef6670d8f0d3744b38f3e38c14e22c2396995d40f104175197378eb83c
3
+ size 261183
asymptoticsofwidenetworksfromfeynmandiagrams/95e4fdf1-a2a2-4bcc-8d43-612412d85781_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:752f1fc483e9547552aec53889abab98670a344bc3df10a42c09bb51d06502ce
3
+ size 306467
asymptoticsofwidenetworksfromfeynmandiagrams/95e4fdf1-a2a2-4bcc-8d43-612412d85781_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f5a3460d6c7f61d5083f355a5ba9e90f494b6250956dab6a12d25dcdb7d0a27
3
+ size 1179589
asymptoticsofwidenetworksfromfeynmandiagrams/full.md ADDED
The diff for this file is too large to render. See raw diff
 
asymptoticsofwidenetworksfromfeynmandiagrams/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3cd65dae2720d554dfb9875d3caf1868605104f713d4887129ad031995af71c
3
+ size 1521286
asymptoticsofwidenetworksfromfeynmandiagrams/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94467cc680d726a6e2901851dc086332cb78f065bb8c879aadbc479ab14f6185
3
+ size 1790289
atstabilitysedgehowtoadjusthyperparameterstopreserveminimaselectioninasynchronoustrainingofneuralnetworks/69a3b900-5a48-447f-89ee-4bb1ead753e3_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68700f7efabc38863e8be8908c011bcfcfa8fde398e6a0599f9e31dec1d64ca5
3
+ size 131854
atstabilitysedgehowtoadjusthyperparameterstopreserveminimaselectioninasynchronoustrainingofneuralnetworks/69a3b900-5a48-447f-89ee-4bb1ead753e3_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81e034d868ec4c8475cc005ace101a78154d3a57ac5309f1e21c02233c7fd6ed
3
+ size 153092