SlowGuess commited on
Commit
04df119
·
verified ·
1 Parent(s): 1d00f9f

Add Batch 0e7bb813-ada6-4a4a-8c5f-6b0e7c64b6ad

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. stochasticconditionalgenerativenetworkswithbasisdecomposition/7181bcf9-4616-486b-b50c-3b2c94650efb_content_list.json +3 -0
  2. stochasticconditionalgenerativenetworkswithbasisdecomposition/7181bcf9-4616-486b-b50c-3b2c94650efb_model.json +3 -0
  3. stochasticconditionalgenerativenetworkswithbasisdecomposition/7181bcf9-4616-486b-b50c-3b2c94650efb_origin.pdf +3 -0
  4. stochasticconditionalgenerativenetworkswithbasisdecomposition/full.md +591 -0
  5. stochasticconditionalgenerativenetworkswithbasisdecomposition/images.zip +3 -0
  6. stochasticconditionalgenerativenetworkswithbasisdecomposition/layout.json +3 -0
  7. stochasticweightaveraginginparallellargebatchtrainingthatgeneralizeswell/5c9254ab-6ea1-4d34-b7cd-bb0d48c2d31c_content_list.json +3 -0
  8. stochasticweightaveraginginparallellargebatchtrainingthatgeneralizeswell/5c9254ab-6ea1-4d34-b7cd-bb0d48c2d31c_model.json +3 -0
  9. stochasticweightaveraginginparallellargebatchtrainingthatgeneralizeswell/5c9254ab-6ea1-4d34-b7cd-bb0d48c2d31c_origin.pdf +3 -0
  10. stochasticweightaveraginginparallellargebatchtrainingthatgeneralizeswell/full.md +265 -0
  11. stochasticweightaveraginginparallellargebatchtrainingthatgeneralizeswell/images.zip +3 -0
  12. stochasticweightaveraginginparallellargebatchtrainingthatgeneralizeswell/layout.json +3 -0
  13. structbertincorporatinglanguagestructuresintopretrainingfordeeplanguageunderstanding/77c07111-eec3-4d5b-8ceb-4b81d0b63175_content_list.json +3 -0
  14. structbertincorporatinglanguagestructuresintopretrainingfordeeplanguageunderstanding/77c07111-eec3-4d5b-8ceb-4b81d0b63175_model.json +3 -0
  15. structbertincorporatinglanguagestructuresintopretrainingfordeeplanguageunderstanding/77c07111-eec3-4d5b-8ceb-4b81d0b63175_origin.pdf +3 -0
  16. structbertincorporatinglanguagestructuresintopretrainingfordeeplanguageunderstanding/full.md +226 -0
  17. structbertincorporatinglanguagestructuresintopretrainingfordeeplanguageunderstanding/images.zip +3 -0
  18. structbertincorporatinglanguagestructuresintopretrainingfordeeplanguageunderstanding/layout.json +3 -0
  19. structpoolstructuredgraphpoolingviaconditionalrandomfields/137ab202-0cdf-41c6-bce9-004525975664_content_list.json +3 -0
  20. structpoolstructuredgraphpoolingviaconditionalrandomfields/137ab202-0cdf-41c6-bce9-004525975664_model.json +3 -0
  21. structpoolstructuredgraphpoolingviaconditionalrandomfields/137ab202-0cdf-41c6-bce9-004525975664_origin.pdf +3 -0
  22. structpoolstructuredgraphpoolingviaconditionalrandomfields/full.md +279 -0
  23. structpoolstructuredgraphpoolingviaconditionalrandomfields/images.zip +3 -0
  24. structpoolstructuredgraphpoolingviaconditionalrandomfields/layout.json +3 -0
  25. structuredobjectawarephysicspredictionforvideomodelingandplanning/faa820dc-e76d-498e-8695-ab0555120424_content_list.json +3 -0
  26. structuredobjectawarephysicspredictionforvideomodelingandplanning/faa820dc-e76d-498e-8695-ab0555120424_model.json +3 -0
  27. structuredobjectawarephysicspredictionforvideomodelingandplanning/faa820dc-e76d-498e-8695-ab0555120424_origin.pdf +3 -0
  28. structuredobjectawarephysicspredictionforvideomodelingandplanning/full.md +380 -0
  29. structuredobjectawarephysicspredictionforvideomodelingandplanning/images.zip +3 -0
  30. structuredobjectawarephysicspredictionforvideomodelingandplanning/layout.json +3 -0
  31. subpolicyadaptationforhierarchicalreinforcementlearning/b11c88d9-c48e-4534-80da-23c1884da58e_content_list.json +3 -0
  32. subpolicyadaptationforhierarchicalreinforcementlearning/b11c88d9-c48e-4534-80da-23c1884da58e_model.json +3 -0
  33. subpolicyadaptationforhierarchicalreinforcementlearning/b11c88d9-c48e-4534-80da-23c1884da58e_origin.pdf +3 -0
  34. subpolicyadaptationforhierarchicalreinforcementlearning/full.md +403 -0
  35. subpolicyadaptationforhierarchicalreinforcementlearning/images.zip +3 -0
  36. subpolicyadaptationforhierarchicalreinforcementlearning/layout.json +3 -0
  37. symplecticodenetlearninghamiltoniandynamicswithcontrol/7de5f77e-81be-4e86-af97-52c5a4dc0226_content_list.json +3 -0
  38. symplecticodenetlearninghamiltoniandynamicswithcontrol/7de5f77e-81be-4e86-af97-52c5a4dc0226_model.json +3 -0
  39. symplecticodenetlearninghamiltoniandynamicswithcontrol/7de5f77e-81be-4e86-af97-52c5a4dc0226_origin.pdf +3 -0
  40. symplecticodenetlearninghamiltoniandynamicswithcontrol/full.md +604 -0
  41. symplecticodenetlearninghamiltoniandynamicswithcontrol/images.zip +3 -0
  42. symplecticodenetlearninghamiltoniandynamicswithcontrol/layout.json +3 -0
  43. synthesizingprogrammaticpoliciesthatinductivelygeneralize/aab0d129-5e56-4707-9e10-54a49eb1f2d9_content_list.json +3 -0
  44. synthesizingprogrammaticpoliciesthatinductivelygeneralize/aab0d129-5e56-4707-9e10-54a49eb1f2d9_model.json +3 -0
  45. synthesizingprogrammaticpoliciesthatinductivelygeneralize/aab0d129-5e56-4707-9e10-54a49eb1f2d9_origin.pdf +3 -0
  46. synthesizingprogrammaticpoliciesthatinductivelygeneralize/full.md +549 -0
  47. synthesizingprogrammaticpoliciesthatinductivelygeneralize/images.zip +3 -0
  48. synthesizingprogrammaticpoliciesthatinductivelygeneralize/layout.json +3 -0
  49. tabfactalargescaledatasetfortablebasedfactverification/6902c8b0-acd5-41b8-8542-8ac0af074538_content_list.json +3 -0
  50. tabfactalargescaledatasetfortablebasedfactverification/6902c8b0-acd5-41b8-8542-8ac0af074538_model.json +3 -0
stochasticconditionalgenerativenetworkswithbasisdecomposition/7181bcf9-4616-486b-b50c-3b2c94650efb_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ba8696109b1d63dd389fb8aec44d4a3a211a7cf7d0eaa47fc6584a7e295710a
3
+ size 129065
stochasticconditionalgenerativenetworkswithbasisdecomposition/7181bcf9-4616-486b-b50c-3b2c94650efb_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d9349ce1358def22bd97fbb6c8f30df50a5e8a0ec4b1c708dd473446ff988fd
3
+ size 138147
stochasticconditionalgenerativenetworkswithbasisdecomposition/7181bcf9-4616-486b-b50c-3b2c94650efb_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71956661d37503e0b42c497859c8cf30b5ef0e33bd76bb73ea44fdcf259d9edb
3
+ size 17311611
stochasticconditionalgenerativenetworkswithbasisdecomposition/full.md ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # STOCHASTIC CONDITIONAL GENERATIVE NETWORKS WITH BASIS DECOMPOSITION
2
+
3
+ Ze Wang, Xiuyuan Cheng, Guillermo Sapiro, Qiang Qiu
4
+
5
+ Duke University
6
+
7
+ {ze.w, xiuyuan.cheng, guillermo.sapiro, qiang.qiu}@duke.edu
8
+
9
+ # ABSTRACT
10
+
11
+ While generative adversarial networks (GANs) have revolutionized machine learning, a number of open questions remain to fully understand them and exploit their power. One of these questions is how to efficiently achieve proper diversity and sampling of the multi-mode data space. To address this, we introduce BasisGAN, a stochastic conditional multi-mode image generator. By exploiting the observation that a convolutional filter can be well approximated as a linear combination of a small set of basis elements, we learn a plug-and-play based basis generator to stochastically generate basis elements, with just a few hundred of parameters, to fully embed stochasticity into convolutional filters. By sampling basis elements instead of filters, we dramatically reduce the cost of modeling the parameter space with no sacrifice on either image diversity or fidelity. To illustrate this proposed plug-and-play framework, we construct variants of BasisGAN based on state-of-the-art conditional image generation networks, and train the networks by simply plugging in a basis generator, without additional auxiliary components, hyperparameters, or training objectives. The experimental success is complemented with theoretical results indicating how the perturbations introduced by the proposed sampling of basis elements can propagate to the appearance of generated images.
12
+
13
+ # 1 INTRODUCTION
14
+
15
+ Conditional image generation networks learn mappings from the condition domain to the image domain by training on massive samples from both domains. The mapping from a condition, e.g., a map, to an image, e.g., a satellite image, is essentially one-to-many as illustrated in Figure 1. In other words, there exists many plausible output images that satisfy a given input condition, which motivates us to explore multi-mode conditional image generation that produces diverse images conditioned on one single input condition.
16
+
17
+ One technique to improve image generation diversity is to feed the image generator with an additional latent code in the hope that such code can carry information that is not covered by the input condition, so that diverse output images are achieved by decoding the missing information conveyed through different latent codes. However, as illustrated in the seminal work Isola et al. (2017), encoding the diversity with an input latent code can lead to unsatisfactory performance for the following reasons. While training using objectives like GAN loss Goodfellow et al. (2014), regularizations like L1 loss Isola et al. (2017) and perceptual loss Wang et al. (2018) are imposed to improve both visual fidelity and correspondence to the input condition. However, no similar regularization is imposed to enforce the correspondence between outputs and latent codes, so that the network is prone to ignore input latent codes in training, and produce identical images from an input condition even with different latent codes. Several methods are proposed to explicitly encourage the network to take into account input latent codes to encode diversity. For example, Mao et al. (2019) explicitly maximizes the ratio of the distance between generated images with respect to the corresponding latent codes; while Zhu et al. (2017b) applies an auxiliary network for decoding the latent codes from the generative images. Although the diversity of the generative images is significantly improved, these methods experience drawbacks. In Mao et al. (2019), at least two samples generated from the same condition are needed for calculating the regularization term, which multiplies the memory footprint while training each mini-batch. Auxiliary network structures and training objectives in Zhu et al. (2017b) unavoidably increase training difficulty and memory footprint. These previously proposed methods usually require considerable modifications to the underlying framework.
18
+
19
+ ![](images/139650e49336467bb00eebcd8648ffa3e6ec7168c56891f9b9237405a4f065f7.jpg)
20
+ Figure 1: Illustration of the proposed BasisGAN. The diversity generated images are achieved by the parameter generation in the stochastic sub-model, where basis generators take samples from a prior distribution and generate low dimensional basis elements from the learned spaces. The sampled basis elements are linearly combined using the deterministic bases coefficients and used to reconstruct the convolutional filters. Filters in each stochastic layer are modeled with a separate basis generator. By convolving the same feature from the deterministic sub-model using different convolutional filters, images with diverse appearances are generated.
21
+
22
+ In this paper, we propose a stochastic model, BasisGAN, that directly maps an input condition to diverse output images, aiming at building networks that model the multi-mode intrinsically. The proposed method exploits a known observation that a well-trained deep network can converge to significantly different sets of parameters across multiple trainings, due to factors such as different parameter initializations and different choices of mini-batches. Therefore, instead of treating a conditional image generation network as a deterministic function with fixed parameters, we propose modeling the filter in each convolutional layer as a sample from filter space, and learning the corresponding filter space using a tiny network for efficient and diverse filter sampling. In Ghosh et al. (2018), parameter non-uniqueness is used for multi-mode image generation by training several generators with different parameters simultaneously as a multi-agent solution. However, the maximum modes of Ghosh et al. (2018) are restricted by the number of agents, and the replication increases memory as well as computational cost. Based on the above parameters non-uniqueness property, we introduce into a deep network stochastic convolutional layers, where filters are sampled from learned filter spaces. Specifically, we learn the mapping from a simple prior to the filter space using neural networks, here referred to as filter generators. To empower a deterministic network with multi-mode image generation, we divide the network into a deterministic sub-model and a stochastic sub-model as shown in Figure 1, where standard convolutional layers and stochastic convolutional layers with filter generators are deployed, respectively. By optimizing an adversarial loss, filter generators can be jointly trained with a conditional image generation network. In each forward pass, filters at stochastic layers are sampled by filter generators. Highly diverse images conditioned on the same input are achieved by jointly sampling of filters in multiple stochastic convolutional layers.
23
+
24
+ However, filters of a convolutional layer are usually high-dimensional while being together written as one vector, which makes the modeling and sampling of a filter space highly costly in practice in terms of training time, sampling time, and filter generator memory footprint. Based on the low-rank property observed from sampled filters, we decompose each filter as a linear combination of a small set of basis elements Qiu et al. (2018), and propose to only sample low-dimensional spatial basis elements instead of filters. By replacing filter generators with basis generators, the proposed method becomes highly efficient and practical. Theoretical arguments are provided on how perturbations introduced by sampling basis elements can propagate to the appearance of generated images.
25
+
26
+ The proposed BasisGAN introduces a generalizable concept to promote diverse modes in the conditional image generation. As basis generators act as plug-and-play modules, variants of BasisGAN can be easily constructed by replacing in various state-of-the-art conditional image generation net
27
+
28
+ works the standard convolutional layers by stochastic layers with basis generators. Then, we directly train them without additional auxiliary components, hyperparameters, or training objectives on top of the underlying models. Experimental results consistently show that the proposed BasisGAN is a simple yet effective solution to multi-mode conditional image generation. We further empirically show that the inherent stochasticity introduced by our method allows training without paired samples, and the one-to-many image-to-image translation is achieved using a stochastic auto-encoder where stochasticity prevents the network from learning a trivial identity mapping.
29
+
30
+ Our contributions are summarized as follows:
31
+
32
+ - We propose a plug-and-play based basis generator to stochastically generate basis elements, with just a few hundred of parameters, to fully embed stochasticity into network filters.
33
+ - Theoretic arguments are provided to support the simplification of replacing stochastic filter generation with basis generation.
34
+ - Both the generation fidelity and diversity of the proposed BasisGAN with basis generators are validated extensively, and state-of-the-art performances are consistently observed.
35
+
36
+ # 2 RELATED WORK
37
+
38
+ Conditional image generation. Parametric modeling of the natural image distribution has been studied for years, from restricted Boltzmann machines Smolensky (1986) to variational autoencoders Kingma & Welling (2013); in particular variants with conditions Oord et al. (2016); Sohn et al. (2015); Van den Oord et al. (2016) show promising results. With the great power of GANs Goodfellow et al. (2014), conditional generative adversarial networks (cGANs) Isola et al. (2017); Pathak et al. (2016); Sangkloy et al. (2017); Wang et al. (2018); Xian et al. (2018); Zhu et al. (2017a) achieve great progress on visually appealing images given conditions. However, the quality of images and the loyalty to input conditions come with sacrifice on image diversity as discussed in Zhu et al. (2017b), which is addressed by the proposed BasisGAN.
39
+
40
+ Multi-mode conditional image generation. To enable the cGANs with multi-mode image generation, pioneer works like infoGAN Chen et al. (2016) and pix2pix Isola et al. (2017) propose to encode the diversity in an input latent code. To enforce the networks to take into account input latent codes, Zhu et al. (2017b) deploys auxiliary networks and training objectives to impose the recovery of the input latent code from the generated images. MSGAN Mao et al. (2019) and DSGAN Yang et al. (2019) propose regularization terms for diversity that enforces a larger distance between generated images with respect to different input latent codes given one input condition. These methods require considerable modifications to the underlying original framework.
41
+
42
+ Neural network parameters generating and uncertainty. Extensive studies have been conducted for generating network parameters using another network since Hypernetworks Ha et al. (2016). As a seminal work on network parameter modeling, Hypernetworks successfully reduce learnable parameters by relaxing weight-sharing across layers. Followup works like Bayesian Hypernetworks Krueger et al. (2017) further introduce uncertainty to the generated parameters. Variational inference based methods like Bayes by Backprop Blundell et al. (2015) solve the intractable posterior distribution of parameters by assuming a prior (usually Gaussian). However, the assumed prior unavoidably degrades the expressiveness of the learned distribution. The parameter prediction of neural network is intensively studied under the context of few shot learning Bertinetto et al. (2016); Qiao et al. (2018); Wang et al. (2019), which aims to customize a network to a new task adaptively and efficiently in a data-driven way. Apart from few shot learning, Denil et al. (2013) suggests parameter prediction as a way to study the redundancy in neural networks. While studying the representation power of random weights, Saxe et al. (2011) also suggests the uncertainty and non-uniqueness of network parameters. Another family of network with uncertainty is based on variational inference Blundell et al. (2015), where an assumption of the distribution on network weights is imposed for a tractable learning on the distribution of weights. Works on studying the relationship between local and global minima of deep networks Haeffele & Vidal (2015); Vidal et al. (2017) also suggest the non-uniqueness of optimal parameters of a deep network.
43
+
44
+ # 3 STOCHASTIC FILTER GENERATION
45
+
46
+ A conditional generative network (cGAN) Mirza & Osindero (2014) learns the mapping from input condition domain $\mathcal{A}$ to output image domain $\mathcal{B}$ using a deep neural network. The conditional image generation is essentially a one-to-many mapping as there could be multiple plausible instances $\mathbf{B} \in \mathcal{B}$ that map to a condition $\mathbf{A} \in \mathcal{A}$ Zhu et al. (2017b), corresponding to a distribution $p(\mathbf{B}|\mathbf{A})$ . However, the naive mapping of the generator formulated by a neural network $G: \mathbf{A} \to \mathbf{B}$ is deterministic, and is incapable of covering the distribution $p(\mathbf{B}|\mathbf{A})$ . We exploit the non-uniqueness of network parameters as discussed above, and introduce stochasticity into convolutional filters through plug-and-play filter generators. To achieve this, we divide a network into two sub-models:
47
+
48
+ - A deterministic sub-model with convolutional filters $\phi$ that remain fixed after training;
49
+ - A stochastic sub-model whose convolutional filters $\mathbf{w}$ are sampled from parameter spaces modeled by neural networks $T$ , referred to as filter generators, parametrized by $\theta$ with inputs $z$ from a prior distribution, e.g., $\mathcal{N}(0,I)$ for all experiments in this paper.
50
+
51
+ Note that filters in each stochastic layer are modeled with a separate neural network, which is not explicitly shown in the formulation for notation brevity. With this formulation, the conditional image generation becomes $G_{\phi,\theta} : \mathbf{A} \to \mathbf{B}$ , with stochasticity achieved by sampling filters $\mathbf{w} = T_{\theta}(z)$ for the stochastic sub-model in each forward pass. The conditional GAN loss Goodfellow et al. (2014); Mirza & Osindero (2014) then become
52
+
53
+ $$
54
+ \begin{array}{l} \min _ {G} \max _ {D} V (D, G) = \mathbb {E} _ {\mathbf {A} \sim p (\mathbf {A}), \mathbf {B} \sim p (\mathbf {B} | \mathbf {A})} [ \log (D (\mathbf {A}, \mathbf {B})) ] + \tag {1} \\ \mathbb {E} _ {\mathbf {A} \sim p (\mathbf {A}), z \sim p (z)} [ \log (1 - D (\mathbf {A}, G _ {\phi , \theta} (\mathbf {A}; T _ {\theta} (z)))) ], \\ \end{array}
55
+ $$
56
+
57
+ where $D$ denotes a standard discriminator. Note that we represent the generator here as $G_{\phi, \theta}(A; T_{\theta}(z))$ to emphasize that the generator uses stochastic filters $\mathbf{w} = T_{\theta}(z)$ .
58
+
59
+ Given a stochastic generative network parametrized by $\phi$ and $\theta$ , and input condition $\mathbf{A}$ , the generated images form a conditional probability $q_{\phi, \theta}(\mathbf{B}|\mathbf{A})$ , so that (1) can be simplified as
60
+
61
+ $$
62
+ \begin{array}{l} \min _ {G} \max _ {D} V (D, G) = \mathbb {E} _ {\mathbf {A} \sim p (\mathbf {A}), \mathbf {B} \sim p (\mathbf {B} | \mathbf {A})} \log D (\mathbf {A}, \mathbf {B}) + \tag {2} \\ \mathbb {E} _ {\mathbf {A} \sim p (\mathbf {A}), \mathbf {B} \sim q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})} \log [ 1 - D (\mathbf {A}, \mathbf {B}) ]. \\ \end{array}
63
+ $$
64
+
65
+ When the optimal discriminator is achieved, (2) can be reformulated as
66
+
67
+ $$
68
+ C (G) = \max _ {D} V (D, G) = \mathbb {E} _ {\mathbf {A} \sim p (\mathbf {A})} [ - \log (4) + 2 \cdot J S D (p (\mathbf {B} | \mathbf {A}) | | q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})) ], \tag {3}
69
+ $$
70
+
71
+ where $JSD$ is the Jensen-Shannon divergence (the proof is provided in the supplementary material). The global minimum of (3) is achieved when given every sampled condition $\mathbf{A}$ , the generator perfectly replicates the true distribution $p(\mathbf{B}|\mathbf{A})$ , which indicates that by directly optimizing the loss in (1), conditional image generation with diversity is achieved with the proposed stochasticity in the convolutional filters.
72
+
73
+ To optimize (1), we train $D$ as in Goodfellow et al. (2014) to maximize the probability of assigning the correct label to both training examples and samples from $G_{\phi, \theta}$ . Simultaneously, we train $G_{\phi, \theta}$ to minimize the following loss, where filter generators $T_{\theta}$ are jointly optimized to bring stochasticity:
74
+
75
+ $$
76
+ \mathcal {L} = \mathbb {E} _ {\mathbf {A} \sim p (\mathbf {A}, \mathbf {B}), z \sim p (z)} [ \log (1 - D (\mathbf {A}, G _ {\phi , \theta} (\mathbf {A}; T _ {\theta} (z)))) ]. \tag {4}
77
+ $$
78
+
79
+ We describe in detail the optimization of the generator parameters $\{\phi, \theta\}$ in supplementary material Algorithm 1.
80
+
81
+ Discussions on diversity modeling in cGANs. The goal of cGAN is to model the conditional probability $p(\mathbf{B}|\mathbf{A})$ . Previous cGAN models Mao et al. (2019); Mirza & Osindero (2014); Zhu et al. (2017b) typically incorporate randomness in the generator by setting $\mathbf{B} = G(\mathbf{A},z), z \sim p(z)$ , where $G$ is a deep network with deterministic parametrization and the randomness is introduced via $z$ , e.g., a latent code, as an extra input. This formulation implicitly makes the following two assumptions: (A1) The randomness of the generator is independent from that of $p(A)$ ; (A2) Each realization $\mathbf{B}(\omega)$ conditional on $\mathbf{A}$ can be modeled by a CNN, i.e., $\mathbf{B} = G^{\omega}(\mathbf{A})$ , where $G^{\omega}$ is a draw from an ensemble of CNNs, $\omega$ being the random event. (A1) is reasonable as long as the source of variation to be modeled by cGAN is independent from that contained in $\mathbf{A}$ , and the rational of
82
+
83
+ (A2) lies in the expressive power of CNNs for image to image translation. The previous model adopts a specific form of $G^{\omega}(A)$ via feeding random input $z(\omega)$ to $G$ , yet one may observe that the most general formulation under (A1), (A2) would be to sample the generator itself from certain distribution $p(G)$ , which is independent from $p(\mathbf{A})$ . Since generative CNNs are parametrized by convolutional filters, this would be equivalent to set $\mathbf{B} = G(\mathbf{A};\mathbf{w})$ , $\mathbf{w} \sim p(\mathbf{w})$ , where we use “;” in the parentheses to emphasize that what after is parametrization of the generator. The proposed cGAN model in the current paper indeed takes such an approach, where we model $p(\mathbf{w})$ by a separate filter generator network.
84
+
85
+ # 4 STOCHASTIC BASIS GENERATION
86
+
87
+ Using the method above, filters of each stochastic layer $\mathbf{w}$ are generated in the form of a high-dimensional vector of size $L\times L\times C^{\prime}\times C$ , where $L$ , $C^\prime$ , and $C$ correspond to the kernel size, numbers of input and output channels, respectively. Although directly generating such high-dimensional vectors is feasible, it can be highly costly in terms of training time, sampling time, and memory footprint when the network scale grows. We present a throughout comparison in terms of generated quality and sample filter size in supplementary material Figure A.1, where it is clearly shown that filter generation is too costly to afford. In this section, we propose to replace filter generation with basis generation to achieve a quality/cost effect shown by the red dot in Figure A.1. Details on the memory, parameter number, and computational cost are also provided at the end of the supplementary material, Section G.
88
+
89
+ For convolutional filters, the weights $\mathbf{w}$ is a 3-way tensor involving a spatial index and two channel indices for input and output channel respectively. Tensor low-rank decomposition cannot be defined in a unique way. For convolutional filters, a natural solution then is to separate out the spatial index, which leads to depth-separable network architectures Chollet (2017). Among other studies of low-rank factorization of convolutional layers, Qiu et al. (2018) proposes to approximate a convolutional filter using a set of prefixed basis element linearly combined by learned reconstruction coefficients.
90
+
91
+ Given that the weights in convolutional layers may have a low-rank structure, we collect a large amount of generated filters and reshape the stack of $N$ sampled filters to a 2-dimensional matrix $\mathbf{F}$ with size of $J \times J'$ , where $J = N \times L \times L$ and $J' = C' \times C$ . We consistently observe that $\mathbf{F}$ is always of low effective rank, regardless the network scales we use to estimate the filter distribution. If we assume that a collection of generated filters observe such a low-rank structure, the following theorem proves that it suffices to generate bases in order to generate the desired distribution of filters.
92
+
93
+ Theorem 1. Let $(\Omega, \mathbb{P})$ be probability space and $\mathbf{F} : \Omega \to \mathbb{R}^{L^2 \times C' \times C}$ a 3-way random tensor, where $\mathbf{F}$ maps each event $\omega$ to $\mathbf{F}^\omega(u, \lambda', \lambda)$ , $u \in [L] \times [L]$ , $\lambda' \in [C']$ , $\lambda \in [C]$ . For each fixed $\omega$ and $u$ , $\mathbf{F}^\omega(u) := \{\mathbf{F}^\omega(u, \lambda', \lambda)\}_{\lambda', \lambda} \in \mathcal{L}(\mathbb{R}^{C'}, \mathbb{R}^C)$ . If there exists a set of deterministic linear transforms $a_k, k = 1, \dots, K$ in $\mathcal{L}(\mathbb{R}^{C'}, \mathbb{R}^C)$ s.t. $\mathbf{F}^\omega(u) \in \text{Span}\{a_k\}_{k=1}^K$ for any $\omega$ and $u$ , then there exists $K$ random vectors $\mathbf{b}_k : \Omega \to \mathbb{R}^{L^2}$ , $k = 1, \dots, K$ , s.t. $\mathbf{F}(u, \lambda', \lambda) = \sum_{k=1}^K \mathbf{b}_k(u) a_k(\lambda', \lambda)$ in distribution. If $\mathbf{F}$ has a probability density, then so do $\{\mathbf{b}_k\}_{k=1}^K$ .
94
+
95
+ The proof of the theorem is provided in the supplementary material.
96
+
97
+ We simplify the expensive filter generation problem by decomposing each filter as a linear combination of a small set of basis elements, and then sampling basis elements instead of filters directly. In our method, we assume that the diverse modes of conditional image generations are essentially caused by the spatial perturbations, thus we propose to introduce stochasticity to the spatial basis elements. Specifically, we apply convolutional filer decomposition as in Qiu et al. (2018) to write $\mathbf{w} = \psi \mathbf{a}$ , $\psi \in R^{L \times L \times K}$ , where $\psi$ are basis elements, $\mathbf{a}$ are decomposition coefficients, and $K$ is a pre-defined small value, e.g., $K = 7$ . We keep the decomposition coefficients a deterministic and learned directly from training samples. Instead of using predefined basis elements as in Qiu et al. (2018), we adopt a basis generator network $\mathcal{T}(\theta, z)$ parametrized by $\theta$ , that learns the mapping from random latent vectors $z$ to basis elements $\psi$ with stochasticity. The basis generator networks are jointly trained with the main conditional image generation network in an end-to-end manner. Note that we inherit the term 'basis' from DCFNet Qiu et al. (2018) for the intuition behind the proposed framework, and we do not impose additional constraints such as orthogonality or linear independence to the generated elements. Sampling the basis elements $\psi$ using basis generators dramatically reduces the difficulty on modeling the corresponding probability distribution. The costly filter generators in Section 3 is now replaced by much more efficient basis generators, and stochastic filters are
98
+
99
+ then constructed by linearly combining sampled basis elements with the deterministic coefficients, $\mathbf{w} = \psi \mathbf{a} = \mathcal{T}(\theta ,z)\mathbf{a}$ . The illustration on the convolution filter reconstruction is shown as a part of Figure 1. As illustrated in this figure, BasisGAN is constructed by replacing convolutional layers with the proposed stochastic convolutional layers with basis generators, and the network parameters can be learned without additional auxiliary training objective or regularization.
100
+
101
+ # 5 EXPERIMENTS
102
+
103
+ In this section, we conduct experiments on multiple conditional generation task. Our preliminary objective is to show that thanks to the inherent stochasticity of the proposed BasisGAN, multi-mode conditional image generation can be learned without any additional regularizations that explicitly promote diversity. The effectiveness of the proposed BasisGAN is demonstrated by quantitative and qualitative results on multiple tasks and underlying models. We start with a stochastic auto-encoder example to demonstrate the inherent stochasticity brought by basis generator. Then we proceed to image to image translation tasks, and compare the proposed method with: regularization based methods DSGAN Yang et al. (2019) and MSGAN Mao et al. (2019) that adopt explicit regularization terms that encourages higher distance between output images with different latent code; the model based method MUNIT Huang et al. (2018) that explicitly decouples appearance with content and achieves diverse image generation by manipulating appearance code; and BicycleGAN Zhu et al. (2017b) that uses auxiliary networks to encourage the diversity of the generated images with respect to the input latent code. We further demonstrate that as an essential way to inject randomness to conditional image generation, our method is compatible with existing regularization based methods, which can be adopted together with our proposed method for further performance improvements. Finally, ablation studies on the size of basis generators and the effect of $K$ are provided in the supplementary material, Section E.
104
+
105
+ # 5.1 STOCHASTIC AUTO-ENCODER
106
+
107
+ The inherent stochasticity of the proposed BasisGAN allows learning conditional one-to-many mapping even without paired samples for training. We validate this by a variant of BasisGAN referred as stochastic auto-encoder, which is trained to do simple self-reconstructions with real-world images as inputs. Only L1 loss and GAN loss are imposed to promote fidelity and correspondence. However, thanks to the inherent stochasticity of BasisGAN, we observe that the network does not collapse to a trivial identity mapping, and diverse outputs with strong correspondence to the input images are generated with appealing fidelity. Some illustrative results are shown in Figure 2.
108
+
109
+ ![](images/50c5646773854b6c72cb861164fd05cc4c09d4866e3a723cf75150536cda42b0.jpg)
110
+
111
+ ![](images/7d0d6de1686764b2cd52139aa4a32e1340d3d881d64297d8009e7c155bcfcb84.jpg)
112
+ Input
113
+
114
+ ![](images/de791ad012b861a996a476edcab8774fb6615170ee727bccfe827bbc09b7da68.jpg)
115
+
116
+ ![](images/9a41056a226f37e8bc58baf4d6c317b67112a8651c217931c2afbe795096206e.jpg)
117
+ Generated diverse samples
118
+
119
+ ![](images/3eeb4c11d4dfb4120ebb3bd4ddac13742c6b7f1cd8dea9412ac4f539f6b7d20c.jpg)
120
+
121
+ ![](images/9e142cc244413a983591fd3562883ef2944916ea7a455c1128d5139478f242d1.jpg)
122
+ Input
123
+ Figure 2: Stochastic auto-encoder: one-to-many conditional image generation without paired sample. The network is trained directly to reconstruct the input real-world images, and the inherent stochasticity of the proposed method successfully promotes diverse output appearances with strong fidelity and correspondence to the inputs.
124
+
125
+ ![](images/30e532803b3a99782489c31a04ee6af32081a1de38d077f226f42b966b0c031c.jpg)
126
+
127
+ ![](images/cb83cf5fc4fb2ad2eef27651c9cd76ead3a6df623a1ec1bfc0414762f312bb8c.jpg)
128
+ Generated diverse samples
129
+
130
+ # 5.2 IMAGE TO IMAGE TRANSLATION
131
+
132
+ To faithfully validate the fidelity and diversity of generated images, we follow Mao et al. (2019) to evaluate the performance quantitatively using the following metrics:
133
+
134
+ LPIPS. The diversity of generated images are measured using LPIPS Mao et al. (2019). LPIPS computes the distance of images in the feature space. Generated images with higher diversity give higher LPIPS scores, which are more favourable in conditional image generation.
135
+
136
+ FID. FID Heusel et al. (2017) is used to measure the fidelity of the generated images. It computes
137
+
138
+ ![](images/1e42957f7dd07b261ff55edde69493e18f539a6e77fb7ff49270ae319e76f287.jpg)
139
+
140
+ ![](images/6955150581b8b4a711592d8337f554f8d11d22888f892ec933d65f22a6b1487c.jpg)
141
+
142
+ ![](images/1bcd58fa19364ff518cea36630d7914f598834eb9e7818ef16a055e63b2881b3.jpg)
143
+
144
+ ![](images/b0bde2b96115d2a47e0bb4accd62bf2a4644353386ad119e27dbd0f85a079cdc.jpg)
145
+ Input
146
+
147
+ ![](images/751789a59d654a8e6eaf7eec3e2fee24f503c19db5d22d787229457266541fba.jpg)
148
+
149
+ ![](images/71d8d64dea6fb4fd59979501f8d530e085a689d64955ab45b1adda5f57642d75.jpg)
150
+
151
+ ![](images/63d4a225e152634704c3486a1ce2b6f733726b71d539366f819455b579df2566.jpg)
152
+
153
+ ![](images/edd4a84db4c5badc309b98d31ec361bb4c42fb630e880d376107394567ddec72.jpg)
154
+ Ground truth
155
+
156
+ ![](images/2d9b08aa50b7fb5d0eefd7025285fa5df0acaebc9aa74ec68bd2540fc88155fb.jpg)
157
+
158
+ ![](images/492ca5c1046092f8fe3a8a21073ae7416e1c3087d32e8e8c393cfac87fb30059.jpg)
159
+
160
+ ![](images/bfd6906ee37addce7d826557261bb2af691faaee2aa4e69eacd5fe6cb06b7f0e.jpg)
161
+
162
+ ![](images/eb57ef7797f0b55c33871f0deac9f47207f33256b72cfefeb1dd9a2d5b1314cc.jpg)
163
+ Figure 3: BasisGAN adapted from Pix2Pix. The network is trained without any auxiliary loss functions or regularizations. From top to bottom, the image to image translation tasks are: edges $\rightarrow$ handbags, edges $\rightarrow$ shoes, maps $\rightarrow$ satellite, nights $\rightarrow$ days, facades $\rightarrow$ buildings. Additional examples are provided in the supplementary material, Figure A.2.
164
+
165
+ ![](images/3ceadfb5a01d112b5464ce0dc88381a25bf755250c68d18f9a6b5af0d3d3d810.jpg)
166
+
167
+ ![](images/3ad4ba6e45fc947a8929505f5b657e5c2877a3c4ac940b9b92c905b3be95470e.jpg)
168
+
169
+ ![](images/628e5661f69042456691728cc1232e3b004ead80ef113f172abc55435ff54807.jpg)
170
+
171
+ ![](images/b8950112b1179b8aa67a638f975730eb597120b3a087786298fa9a13ad514688.jpg)
172
+ Generated diverse samples
173
+
174
+ ![](images/1efd92ae073e1ab38cd5c04ebe025580cf28cb25344a2614ecb8d2c92890469e.jpg)
175
+
176
+ ![](images/9af87d8b98092a2ec12c262ea7f6f62c80f46f62a3e88c301adb30265bb4bc9f.jpg)
177
+
178
+ ![](images/f32c0a4233ee6568003d108eac6d1e649ef2d2ce5fbd08a1aea4327b027f93d3.jpg)
179
+
180
+ ![](images/08b497914692875e4d927dc72067af021149c093bb1d5f169e2351ba57cb467d.jpg)
181
+
182
+ ![](images/2c006a146415ca2e2b36d2a66f88650157973cfdc9288d6b5ee7660261d03b41.jpg)
183
+
184
+ ![](images/474d6f26186d39511a7b2e39a9715d776deb4696b2330329d28b72a077ec2be3.jpg)
185
+
186
+ ![](images/9a1d295698dd0bae4e127e3ee01f73decb9b6f8cb1a6789ed894797a0c0e400b.jpg)
187
+
188
+ ![](images/8a316066fa106882828472dc8e606fa9248b34ce83fa10e5e2ccd298a4d24e7a.jpg)
189
+
190
+ ![](images/84ee8af224694324f08f00b3c7e723fa6006b9ed6aa6137f81a93961082d3d4b.jpg)
191
+
192
+ ![](images/5162c563d1bfb21bfc2c3e31d6e2efcfe3dc4e05bdfe37ed3130be3608ea7465.jpg)
193
+
194
+ ![](images/e077b65014bcc68e3e2d7ae655bd09ece69d7a4630d80e140b1fcb34e8837957.jpg)
195
+
196
+ ![](images/0fd5af6c14fbab042be3e40f78ec879ccae4aa21f4e6a1270642328f169fc463.jpg)
197
+
198
+ ![](images/a12e54eb018f917b08bfcc9ad068108dc28fa8e66a2aa6a546a73811d71219b9.jpg)
199
+
200
+ ![](images/693faf68766a76ad6636c8a3ccd605922c7bf3e168e1023b4582624dc76fbb5d.jpg)
201
+
202
+ ![](images/a40403bb0856a0cffe248feea77a68bb1ad4c32fca3376e892ad9014e586c159.jpg)
203
+
204
+ ![](images/21549837caf6c4bf660243c9967bcb6b4901642b5dddba5df4f74e2e76cafd61.jpg)
205
+
206
+ the distance between the distribution of the generated images and the true images. Since the entire GAN family is to faithfully model true data distribution parametrically, lower FID is favourable in our case since it reflects a closer fit to the desired distribution.
207
+
208
+ Pix2Pix $\rightarrow$ BasisGAN. As one of the most prevalent conditional image generation network, Pix2Pix Isola et al. (2017) serves as a solid baseline for many multi-mode conditional image generation methods. It achieves conditional image generation by feeding the generator a conditional image, and training the generator to synthesize image with both GAN loss and L1 loss to the ground truth image. Typical applications for Pix2Pix include edge maps $\rightarrow$ shoes or handbags, maps $\rightarrow$ satellites, and so on. We adopt the ResNet based Pix2Pix model, and impose the proposed stochasticity in the successive residual blocks, where regular convolutional layers and convolutional layers with basis generators convolve alternatively with the feature maps. The network is re-trained from scratch directly without any extra loss functions or regularizations. Some samples are visualized in Figure 3. For a fair comparison with previous works Isola et al. (2017); Mao et al. (2019); Zhu et al. (2017b); Yang et al. (2019); Huang et al. (2018), we perform the quantitative evaluations on image to image translation tasks and the results are presented in Table 1. Qualitative comparisons are presented in Figure A.3. As discussed, all the state-of-the-art methods require considerable modifications to the underlying framework. By simply using the proposed stochastic basis generators as plug-and-play modules to the Pix2Pix model, the BasisGAN generates significantly more diverse images but still at comparable quality with other state-of-the-art methods. Moreover, as shown in Table A.3, BasisGAN reduces the number of trainable parameters comparing to the underlying methods thanks to the small number of basis elements and the tiny basis generator structures. While regularization based methods like Mao et al. (2019); Yang et al. (2019) maintain the parameter numbers of the underlying network models.
209
+
210
+ Pix2PixHD $\rightarrow$ BasisGAN. In this experiment, we report results on high-resolution scenarios, which particularly demand efficiency and have not been previously studied by other conditional image generation methods.
211
+
212
+ We conduct high resolution image synthesis on Pix2PixHD Wang et al. (2018), which is proposed to conditionally generate images with resolution up to $2048 \times 1024$ . The importance of this experiment arises from the fact that existing methods Mao et al. (2019); Zhu et al. (2017b) require considerable modifications to the underlying networks, which in this case, are difficult to be scaled to very high resolution image synthesis due to the memory limit of modern hardware. Our method requires no auxiliary networks structures or special batch formulation, thus is easy to be scaled to large scale scenarios. Some generated samples are visualized in Figure 4. Quantitative results and comparisons
213
+
214
+ Table 1: Quantitative results on image to image translation. Diversity and fidelity are measured using LPIPS and FID, respectively. Pix2Pix Isola et al. (2017), BicycleGAN Zhu et al. (2017b), MSGAN Mao et al. (2019), and DSGAN Yang et al. (2019) are included in the comparisons. DSGAN adopts a different setting (denoted as 20s in the table) by generating 20 samples per input for computing the scores. We report results under both settings.
215
+
216
+ <table><tr><td>Dataset</td><td colspan="6">Labels → Facade</td></tr><tr><td>Methods</td><td>Pix2Pix</td><td>BicycleGAN</td><td>MSGAN</td><td>BasisGAN</td><td>DSGAN (20s)</td><td>BasisGAN (20s)</td></tr><tr><td>Diversity ↑</td><td>0.0003 ± 0.0000</td><td>0.1413 ± 0.0005</td><td>0.1894 ± 0.0011</td><td>0.2648 ± 0.004</td><td>0.18</td><td>0.2594 ± 0.004</td></tr><tr><td>Fidelity ↓</td><td>139.19 ±2.94</td><td>98.85 ± 1.21</td><td>92.84 ± 1.00</td><td>88.7 ± 1.28</td><td>57.20</td><td>24.14 ± 0.76</td></tr><tr><td>Datasets</td><td colspan="6">Map → Satellite</td></tr><tr><td>Methods</td><td>Pix2Pix</td><td>BicycleGAN</td><td>MSGAN</td><td>BasisGAN</td><td>DSGAN (20s)</td><td>BasisGAN (20s)</td></tr><tr><td>Diversity ↑</td><td>0.0016 ± 0.0003</td><td>0.1150 ± 0.0007</td><td>0.2189 ± 0.0004</td><td>0.2417 ± 0.005</td><td>0.13</td><td>0.2398 ± 0.005</td></tr><tr><td>Fidelity ↓</td><td>168.99 ± 2.58</td><td>145.78 ± 3.90</td><td>152.43 ± 2.52</td><td>35.54 ± 2.19</td><td>49.92</td><td>28.92 ± 1.88</td></tr><tr><td></td><td>Dataset</td><td colspan="2">Edge → Handbag</td><td colspan="2">Edge → Shoe</td><td></td></tr><tr><td></td><td>Methods</td><td>MUNIT</td><td>BasisGAN</td><td>MUNIT</td><td>BasisGAN</td><td></td></tr><tr><td></td><td>Diversity ↑</td><td>0.32 ±0.624</td><td>0.35 ±0.810</td><td>0.217 ± 0.512</td><td>0.242 ±0.743</td><td></td></tr><tr><td></td><td>Fidelity ↓</td><td>92.84 ± 0.121</td><td>88.76 ±0.513</td><td>62.57 ± 0.917</td><td>64.17 ± 1.14</td><td></td></tr></table>
217
+
218
+ ![](images/87f8ae8ce013dfa6d868cd85c8581c0fd8c9d9777befd4d6bafbaf874c47fe9b.jpg)
219
+ Figure 4: High resolution conditional image generation. Additional examples are provided in the supplementary material, Figure A.4.
220
+
221
+ ![](images/681854ec1e49e88d5e7aa450e053aa69ffa769601590b006fecc028c5e1c07a5.jpg)
222
+
223
+ ![](images/26dfbf2a0d1ca71b93c204b8cc68939af56c637e384e869c5c310ee29e11815e.jpg)
224
+
225
+ ![](images/61bb371cf5aee4ed48774ad2b73d36f58a204abccddbd97614f3474f6e1c7af9.jpg)
226
+
227
+ against DSGAN Yang et al. (2019) are reported in Table 2. BasisGAN significantly improves both diversity and fidelity with little overheads in terms of training time, testing time, and memory.
228
+
229
+ Image inpainting. We conduct one-to-many image inpainting experiments on face images. Following Yang et al. (2019), centered face images in the celebA dataset are adopted and parts of the faces are discarded by removing the center pixels. We adopt the exact same network used in Yang et al. (2019) and replace the convolutional layers by layers with basis generators. To show the plug-and-play compatibility of the proposed BasisGAN, we conduct experiments by both training BasisGAN alone and combining BasisGAN with regularization based methods DSGAN (BasisGAN + DSGAN). When combining BasisGAN with DSGAN, we feed all the basis generator in BasisGAN with the same latent code and use the distance between the latent codes and the distance between generated samples to compute the regularization term proposed in Yang et al. (2019). Quantitative results and qualitative results are in Table 3 and Figure 5, respectively. BasisGAN delivers good balance between diversity and fidelity, while combining BasisGAN with regularization based DSGAN further improves the performance.
230
+
231
+ Table 2: Quantitative results on high resolution image Table 3: Quantitative results on face into image translation. Diversity and fidelity are measured using LPIPS and FID, respectively.
232
+
233
+ <table><tr><td>Methods</td><td>Pix2PixHD</td><td>DSGAN</td><td>BasisGAN</td></tr><tr><td>Diversity ↑</td><td>0.0</td><td>0.12</td><td>0.168</td></tr><tr><td>Fidelity ↓</td><td>48.85</td><td>28.8</td><td>25.12</td></tr></table>
234
+
235
+ <table><tr><td>Methods</td><td>DSGAN</td><td>BasisGAN</td><td>BasisGAN + DSGAN</td></tr><tr><td>Diversity↑</td><td>0.05</td><td>0.062</td><td>0.073</td></tr><tr><td>Fidelity ↓</td><td>13.94</td><td>12.88</td><td>12.82</td></tr></table>
236
+
237
+ ![](images/4b37f1a06270f652f5f030f0a6bca1659a5fd12c127223c8a2a8951f22d88501.jpg)
238
+ Input condition
239
+
240
+ ![](images/3de25973be9bbc79407a9a2a8129a83e91b688ad07c04905b2b98b45d631e18d.jpg)
241
+
242
+ ![](images/34247637f85e93fc305da4e283ef2ebcdc830ea9acc5645b6d139f92e429f532.jpg)
243
+ BasisGAN
244
+
245
+ ![](images/56b3e9df32cdf9466c7f381e91d4d897dedeaae195ef5c5e44eb279ebb732f8b.jpg)
246
+ Figure 5: Face inpainting examples.
247
+
248
+ ![](images/dcc67cbf507bcd962a742de5bdf7cc8494f786f55e467446516962f6ed40474e.jpg)
249
+ BasisGAN + DSGAN
250
+
251
+ ![](images/65465baf9f270d8e0c9326fd52e1068419bc362a9e3287e572a60f5f45b47ff6.jpg)
252
+
253
+ ![](images/cf9c673ef5c24d71eaa7b0e8f6cce6ac5b0114bccee7ad774e0d071f337c8c28.jpg)
254
+
255
+ # 6 CONCLUSION
256
+
257
+ In this paper, we proposed BasisGAN to model the multi-mode for conditional image generation in an intrinsic way. We formulated BasisGAN as a stochastic model to allow convolutional filters to be sampled from a filter space learned by a neural network instead of being deterministic. To significantly reduce the cost of sampling high-dimensional filters, we adopt parameter reduction using filter decomposition, and sample low-dimensional basis elements, as supported by the theoretical results here presented. Stochasticity is introduced by replacing deterministic convolution layers with stochastic layers with basis generators. BasisGAN with basis generators achieves high-fidelity and high-diversity, state-of-the-art conditional image generation, without any auxiliary training objectives or regularizations. Extensive experiments with multiple underlying models demonstrate the effectiveness and extensibility of the proposed method.
258
+
259
+ # 7 ACKNOWLEDGMENTS
260
+
261
+ Work partially supported by ONR, ARO, NGA, NSF, and gifts from Google, Microsoft, and Amazon.
262
+
263
+ # REFERENCES
264
+
265
+ Luca Bertinetto, João F Henriques, Jack Valmadre, Philip Torr, and Andrea Vedaldi. Learning feedforward one-shot learners. In Advances in Neural Information Processing Systems, pp. 523-531, 2016.
266
+ Charles Blundell, Julien Cornebise, Koray Kavukcuoglu, and Daan Wierstra. Weight uncertainty in neural network. In International Conference on Machine Learning, pp. 1613-1622, 2015.
267
+ Xi Chen, Yan Duan, Rein Houthooft, John Schulman, Ilya Sutskever, and Pieter Abbeel. Infogan: Interpretable representation learning by information maximizing generative adversarial nets. In Advances in Neural Information Processing Systems, pp. 2172-2180, 2016.
268
+ François Chollet. Xception: Deep learning with depthwise separable convolutions. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 1251-1258, 2017.
269
+ Misha Denil, Babak Shakibi, Laurent Dinh, Nando De Freitas, et al. Predicting parameters in deep learning. In Advances in Neural Information Processing Systems, pp. 2148-2156, 2013.
270
+ Arnab Ghosh, Viveka Kulharia, Vinay P Namboodiri, Philip HS Torr, and Puneet K Dokania. Multiagent diverse generative adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 8513-8521, 2018.
271
+ Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems, pp. 2672-2680, 2014.
272
+ David Ha, Andrew Dai, and Quoc V Le. Hypernetworks. arXiv preprint arXiv:1609.09106, 2016.
273
+ Benjamin D Haeffele and René Vidal. Global optimality in tensor factorization, deep learning, and beyond. arXiv preprint arXiv:1506.07540, 2015.
274
+ Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Advances in Neural Information Processing Systems, pp. 6626-6637, 2017.
275
+
276
+ Xun Huang, Ming-Yu Liu, Serge Belongie, and Jan Kautz. Multimodal unsupervised image-to-image translation. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 172-189, 2018.
277
+ Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 1125-1134, 2017.
278
+ Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.
279
+ David Krueger, Chin-Wei Huang, Riashat Islam, Ryan Turner, Alexandre Lacoste, and Aaron Courville. Bayesian hypernetworks. arXiv preprint arXiv:1710.04759, 2017.
280
+ Qi Mao, Hsin-Ying Lee, Hung-Yu Tseng, Siwei Ma, and Ming-Hsuan Yang. Mode seeking generative adversarial networks for diverse image synthesis. In IEEE Conference on Computer Vision and Pattern Recognition, 2019.
281
+ Mehdi Mirza and Simon Osindero. Conditional generative adversarial nets. arXiv preprint arXiv:1411.1784, 2014.
282
+ Aaron van den Oord, Nal Kalchbrenner, and Koray Kavukcuoglu. Pixel recurrent neural networks. arXiv preprint arXiv:1601.06759, 2016.
283
+ Deepak Pathak, Philipp Krahenbuhl, Jeff Donahue, Trevor Darrell, and Alexei A Efros. Context encoders: Feature learning by inpainting. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 2536-2544, 2016.
284
+ Siyuan Qiao, Chenxi Liu, Wei Shen, and Alan L Yuille. Few-shot image recognition by predicting parameters from activations. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 7229-7238, 2018.
285
+ Qiang Qiu, Xiuyuan Cheng, Robert Calderbank, and Guillermo Sapiro. DCFNet: Deep neural network with decomposed convolutional filters. International Conference on Machine Learning, 2018.
286
+ Patsorn Sangkloy, Jingwan Lu, Chen Fang, Fisher Yu, and James Hays. Scribbler: Controlling deep image synthesis with sketch and color. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 5400-5409, 2017.
287
+ Andrew M Saxe, Pang Wei Koh, Zhenghao Chen, Maneesh Bhand, Bipin Suresh, and Andrew Y Ng. On random weights and unsupervised feature learning. In International Conference on Machine Learning, volume 2, pp. 6, 2011.
288
+ Paul Smolensky. Information processing in dynamical systems: Foundations of harmony theory. Technical report, Colorado Univ at Boulder Dept of Computer Science, 1986.
289
+ Kihyuk Sohn, Honglak Lee, and Xinchen Yan. Learning structured output representation using deep conditional generative models. In Advances in Neural Information Processing Systems, pp. 3483-3491, 2015.
290
+ Aaron Van den Oord, Nal Kalchbrenner, Lasse Espeholt, Oriol Vinyals, Alex Graves, et al. Conditional image generation with pixelCNN decoders. In Advances in Neural Information Processing Systems, pp. 4790-4798, 2016.
291
+ Rene Vidal, Joan Bruna, Raja Giryes, and Stefano Soatto. Mathematics of deep learning. arXiv preprint arXiv:1712.04741, 2017.
292
+ Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. High-resolution image synthesis and semantic manipulation with conditional GANs. In IEEE Conference on Computer Vision and Pattern Recognition, 2018.
293
+ Xin Wang, Fisher Yu, Ruth Wang, Trevor Darrell, and Joseph E Gonzalez. Tafe-net: Task-aware feature embeddings for low shot learning. arXiv preprint arXiv:1904.05967, 2019.
294
+
295
+ Wenqi Xian, Patsorn Sangkloy, Varun Agrawal, Amit Raj, Jingwan Lu, Chen Fang, Fisher Yu, and James Hays. Texturegan: Controlling deep image synthesis with texture patches. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 8456-8465, 2018.
296
+ Dingdong Yang, Seunghoon Hong, Yunseok Jang, Tianchen Zhao, and Honglak Lee. Diversity-sensitive conditional generative adversarial networks. arXiv preprint arXiv:1901.09024, 2019.
297
+ Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In IEEE Conference on Computer Vision and Pattern Recognition, pp. 2223-2232, 2017a.
298
+ Jun-Yan Zhu, Richard Zhang, Deepak Pathak, Trevor Darrell, Alexei A Efros, Oliver Wang, and Eli Shechtman. Toward multimodal image-to-image translation. In Advances in Neural Information Processing Systems, pp. 465-476, 2017b.
299
+
300
+ # A PROOF OF EQUATION (3)
301
+
302
+ Proof. Given (2) in Section 3, the minimax game of adversarial training is expressed as:
303
+
304
+ $$
305
+ \begin{array}{l} \min _ {G} \max _ {D} V (D, G) = \mathbb {E} _ {\mathbf {A} \sim p (\mathbf {A}), \mathbf {B} \sim p (\mathbf {B} | \mathbf {A})} \log D (\mathbf {A}, \mathbf {B}) + \\ \mathbb {E} _ {\mathbf {A} \sim p (\mathbf {A}), \mathbf {B} \sim q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})} \log [ 1 - D (\mathbf {A}, \mathbf {B}) ] \\ = \mathbb {E} _ {\mathbf {A} \sim p (\mathbf {A})} \left\{\mathbb {E} _ {\mathbf {B} \sim p (\mathbf {B} | \mathbf {A})} \log D (\mathbf {A}, \mathbf {B}) + \mathbb {E} _ {\mathbf {B} \sim q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})} \log [ 1 - D (\mathbf {A}, \mathbf {B}) ] \right\}. \tag {A.1} \\ \end{array}
306
+ $$
307
+
308
+ By fixing $\mathbf{A}$ and only consider:
309
+
310
+ $$
311
+ \begin{array}{l} V ^ {\prime} = \mathbb {E} _ {\mathbf {B} \sim p (\mathbf {B} | \mathbf {A})} \log D (\mathbf {A}, \mathbf {B}) + \mathbb {E} _ {\mathbf {B} \sim q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})} \log [ 1 - D (\mathbf {A}, \mathbf {B}) ] \\ = \int_ {\mathbf {B}} p (\mathbf {B} | \mathbf {A}) \log D (\mathbf {A}, \mathbf {B}) + q _ {\phi , \theta} (\mathbf {B} | \mathbf {A}) \log [ 1 - D (\mathbf {A}, \mathbf {B}) ] \mathrm {d} \mathbf {B}. \tag {A.2} \\ \end{array}
312
+ $$
313
+
314
+ The optimal discriminator $D^{*}$ in (A.2) is achieved when
315
+
316
+ $$
317
+ D ^ {*} (\mathbf {A}, \mathbf {B}) = \frac {p (\mathbf {B} | \mathbf {A})}{p (\mathbf {B} | \mathbf {A}) + q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})}. \tag {A.3}
318
+ $$
319
+
320
+ Given the optimal discriminator $D^{*}$ , (A.2) is expressed as:
321
+
322
+ $$
323
+ \begin{array}{l} V ^ {\prime} = \mathbb {E} _ {\mathbf {B} \sim p (\mathbf {B} | \mathbf {A})} \log D ^ {*} (\mathbf {A}, \mathbf {B}) + \mathbb {E} _ {\mathbf {B} \sim q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})} \log [ 1 - D ^ {*} (\mathbf {A}, \mathbf {B}) ] \\ = \mathbb {E} _ {\mathbf {B} \sim p (\mathbf {B} | \mathbf {A})} [ \log \frac {p (\mathbf {B} | \mathbf {A})}{p (\mathbf {B} | \mathbf {A}) + q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})} ] + \mathbb {E} _ {\mathbf {B} \sim q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})} [ \log \frac {q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})}{p (\mathbf {B} | \mathbf {A}) + q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})} ] \\ = - \log (4) + K L (p (\mathbf {B} | \mathbf {A}) | | \frac {p (\mathbf {B} | \mathbf {A}) + q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})}{2}) + K L (p (\mathbf {B} | \mathbf {A}) | | \frac {p (\mathbf {B} | \mathbf {A}) + q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})}{2}) \\ = - \log (4) + 2 \cdot J S D (p (\mathbf {B} | \mathbf {A}) | | q _ {\phi , \theta} (\mathbf {B} | \mathbf {A})) \tag {A.4} \\ \end{array}
324
+ $$
325
+
326
+ where $KL$ is the Kullback-Leibler divergence. The minimum of $V^{\prime}$ is achieved iff the Jensen-Shannon divergence is 0 and $p(\mathbf{B}|\mathbf{A}) = q_{\phi ,\theta}(\mathbf{B}|\mathbf{A})$ . And the global minimum of (A.1) is achieved when given every sampled $\mathbf{A}$ , the generator perfectly replicates the conditional distribution $p(\mathbf{B}|\mathbf{A})$ .
327
+
328
+ # B PROOF OF THEOREM 4.1
329
+
330
+ Proof. We first consider the case when $\{a_k\}_{k=1}^K$ is a linearly independent set in the space of $\mathcal{L}(\mathbb{R}^{C'}, \mathbb{R}^C)$ , which is finite dimensional (the space of $C'$ -by- $C$ matrices). Then $\mathbf{F}^\omega(u)$ is in the span of $\{a_k\}_k$ for any $\omega, u$ means that there are unique coefficients $b(k; \omega, u)$ s.t.
331
+
332
+ $$
333
+ \mathbf {F} ^ {\omega} (u) = \sum_ {k = 1} ^ {K} b (k; \omega , u) a _ {k},
334
+ $$
335
+
336
+ and the vector $\{b(k;\omega ,u)\}_{k}\in \mathbb{R}^{k}$ can be determined from $\mathbf{F}^{\omega}(u)$ by a (deterministic) linear transform. Since each entry $\mathbf{F}(u,\lambda^{\prime},\lambda)$ is a random variable, i.e. measurable function on $(\Omega ,\mathbb{P})$ , then so is $b(k;\cdot ,u)$ viewed as a mapping from $\Omega$ to $\mathbb{R}$ , for each $k$ and $u$ , due to that linear transform between finite dimensional spaces preserves measurability. For same reason, if $\mathbf{F}(u,\lambda^{\prime},\lambda)$ has probability density, then so does each $b(k;\cdot ,u)$ . Letting $\{b(k;\cdot ,u)\}_{u\in [L]\times [L]}$ be the random vectors $\mathbf{b}_k$ proves the statement.
337
+
338
+ When $\{a_k\}_{k=1}^K$ are linearly dependent, the dimensionality of the subspace where $\mathbf{F}^\omega(u)$ lie in is $\tilde{K} < K$ . Suppose $\{\tilde{a}_k\}_{k=1}^{\tilde{K}}$ is a linearly independent set which spans the subspace, and $T: \mathbb{R}^{\tilde{K}} \to \mathbb{R}^K$ is the linear transform to map to $a_k$ 's from $\tilde{a}_k$ 's. Using the argument above, there exist random vectors $\tilde{b}_k$ s.t. $\mathbf{F} = \sum_{k=1}^{\tilde{K}} \tilde{b}_k \tilde{a}_k$ , and using the pseudo-inverse of $T$ to construct random vectors $\{b_k\}_{k=1}^K$ we have that $\mathbf{F} = \sum_{k=1}^K b_k a_k$ . This proves the existence of the $K$ random vectors $b_k$ .
339
+
340
+ # C PARAMETER OPTIMIZATION IN FILTER GENERATION
341
+
342
+ The optimization of the parameters $\{\phi, \theta\}$ in filter generation is presented in Algorithm 1.
343
+
344
+ Algorithm 1 Optimization of the generator parameters $\{\phi, \theta\}$
345
+ ```txt
346
+ for number of iterations do
347
+ ```
348
+
349
+ - Sample a minibatch of $n$ pairs of samples $\left\{ {{\mathbf{A}}_{1}{\mathbf{B}}_{1},\cdots ,{\mathbf{A}}_{n}{\mathbf{B}}_{n}}\right\}$ .
350
+ Sample $z\sim \mathcal{N}(0,I)$
351
+ - Calculate the gradient w.r.t. the convolutional filters $\phi$ and $\mathbf{w}$ as in the standard setting
352
+
353
+ $$
354
+ \Delta_ {\phi} = \frac {\partial \mathcal {L}}{\partial \phi}, \Delta_ {\mathbf {w}} = \frac {\partial \mathcal {L}}{\partial \mathbf {w}},
355
+ $$
356
+
357
+ where $\mathcal{L} = \frac{1}{n}\sum_{i = 1}^{n}[\log (1 - D(\mathbf{A},G_{\phi ,\theta}(\mathbf{A};T_{\theta}(z))))]$
358
+
359
+ Calculate the gradient w.r.t. $\theta$ in the filter generator $\Delta_{\theta} = \Delta_{\mathbf{w}}\frac{\partial\mathbf{w}}{\partial\theta}$
360
+ - Update the parameters $\phi$ : $\phi \gets \phi - \alpha \Delta_{\phi}$ ; $\theta$ : $\theta \gets \theta - \alpha \Delta_{\theta}$ , where $\alpha$ is the learning rate.
361
+
362
+ ![](images/4fb58754205dae15dd6157617b8277293b6acb81634f05639efdc68f55a30297.jpg)
363
+ (a) Quality/cost comparison.
364
+
365
+ ![](images/c520b0cd4ea26a51bf96179eb229b04338b0d6df35ce3b2c8511730dd9e25472.jpg)
366
+ Figure A.1: (a) shows the comparison between basis generation and filter generation in terms of quality and cost. In (b), top row shows images generated with basis generators (the red dot in (a)), bottom row shows images generated with filter generators at the highest cost (highest in (a)). Basis generation achieves better performance with significantly less cost comparing to filter generation. The quality metrics are introduced in Section 5.
367
+
368
+ ![](images/3d14d71eb0c0e7b4f9a928b3a62ad3bb97136f811ff145e752a37249115f9cdf.jpg)
369
+ (b) Generated images.
370
+
371
+ ![](images/724ab5fe3fe56cc34737a3decc5f3d46459c5edb11815eaaf37f589dedc06ea3.jpg)
372
+
373
+ ![](images/c3ee70b27a2500712d9cdbd65a423712c6a1b84926058353828e81dae6299d05.jpg)
374
+
375
+ ![](images/931ae5084e1b1f941acfe3f1fc5b5653cdc85b0f9180f2568566f68f65cfc4a5.jpg)
376
+
377
+ # D COMPUTATION COMPARISON
378
+
379
+ We present a throughout comparison in terms of generated quality and sample filter size in Figure A.1, where it is clearly shown that filter generation is too costly to afford, and basis generation achieves a significantly better quality/cost effect shown by the red dot in Figure A.1.
380
+
381
+ # E ABLATION STUDIES
382
+
383
+ In this section, we perform ablation studies on the proposed BasisGAN, and evaluate multiple factors that can affect generation results. We perform ablation studies on BasisGAN adapted from the Pix2Pix model with the maps $\rightarrow$ satellite dataset.
384
+
385
+ Size of basis generators. We model a basis generator using a small neural network, which consists of several hidden layers and inputs a latent code sampled from a prior distribution. We consistently observe that a basis generator with a single hidden layer achieves the best performance while maintains fast basis generation speed. Here we perform further experiments on the size of intermediate layers and input latent code size, and the results are presented in Table A.1. It is observed that the size of a basis generator does not significantly effect the final performance, and we use the $64 + 64$ setting in all the experiments for a good balance between performances and costs.
386
+
387
+ Number of basis elements $\mathbf{K}$ . By empirically observing the low rank of generated filters, we use $K = 7$ in all the aforementioned experiments. We conduct further experiments to show the performances with larger $K$ and show the results in Table A.2. It is clearly shown that by increasing $K$ , the quality of the generated images do not increase. And when $K$ gets larger, e.g., $K = 128$ , even significantly degrades the diversity of the generated images.
388
+
389
+ Table A.1: Quantitative results with different sizes of input latent code and intermediate layer. $m + n$ denotes the size of latent code and intermediate layer.
390
+
391
+ <table><tr><td>Dimensions</td><td>16 + 16</td><td>32 + 32</td><td>64 + 64</td><td>128 + 128</td><td>256 + 256</td><td>512 + 512</td></tr><tr><td>Diversity ↑</td><td>0.2242</td><td>0.2388</td><td>0.2417</td><td>0.2448</td><td>0.2452</td><td>0.2433</td></tr><tr><td>Fidelity ↓</td><td>40.16</td><td>37.41</td><td>35.54</td><td>34.36</td><td>33.70</td><td>32.31</td></tr></table>
392
+
393
+ Table A.2: Quantitative results with different sizes of input latent code and intermediate layer. $m + n$ denotes the size of latent code and intermediate layer.
394
+
395
+ <table><tr><td>K</td><td>7</td><td>16</td><td>32</td><td>64</td><td>128</td></tr><tr><td>Diversity ↑</td><td>0.2417</td><td>0.2409</td><td>0.2382</td><td>0.2288</td><td>0.2006</td></tr><tr><td>Fidelity ↓</td><td>35.54</td><td>36.08</td><td>35.17</td><td>34.97</td><td>36.49</td></tr></table>
396
+
397
+ # F QUALITATIVE RESULTS
398
+
399
+ # F.1 $\mathrm{PIX2PIX}\rightarrow \mathrm{BASISGAN}$
400
+
401
+ Additional qualitative results for Pix2Pix $\rightarrow$ BasisGAN are presented in Figure A.2. Qualitative comparisons against MSGAN Mao et al. (2019) and DSGAN Yang et al. (2019) are presented in Figure A.3. We directly use the official implementation and the pretrained models provided by the authors. For each example, the first 5 generated samples are presented without any selection. For the satellite $\rightarrow$ map comparison, we often observe missing correspondence in the samples generated by DSGAN. BasisGAN consistently provides samples with diverse details and strong correspondence to the input conditions.
402
+
403
+ # F.2 $\mathrm{PIX2PIXHD\to BASISGAN}$
404
+
405
+ Additional qualitative results for Pix2PixHD $\rightarrow$ BasisGAN are presented in Figure A.4.
406
+
407
+ # G SPEED, PARAMETER, AND MEMORY
408
+
409
+ We use PyTorch for the implementation of all the experiments. The training and testing are performed on a single NVIDIA 1080Ti graphic card with 11GB memory. The comparisons on testing speed, parameter number, and training memory are presented in Table A.3. The training memory is measured under standard setting with resolution of $256 \times 256$ for Pix2Pix, and $1024 \times 512$ for Pix2PixHD. Since we are using small number of basis elements (typically 7), and tiny basis generators, the overall trainable parameter number of the networks are reduced. Note that we only compute the parameter number of the generator networks since we do not adopt any change to the discriminators.
410
+
411
+ Table A.3: Speed in testing, memory usage in training, and overall trainable parameter numbers.
412
+
413
+ <table><tr><td>Methods</td><td>Testing speed (s)</td><td>Training memory (MB)</td><td>Parameter number</td></tr><tr><td>Pix2Pix</td><td>0.01017</td><td>1,465</td><td>11,330,243</td></tr><tr><td>Pix2Pix → BasisGAN</td><td>0.01025</td><td>1,439</td><td>10,261,763</td></tr><tr><td>Pix2PixHD</td><td>0.0299</td><td>8,145</td><td>182,546,755</td></tr><tr><td>Pix2PixHD → BasisGAN</td><td>0.0324</td><td>8,137</td><td>154,378,051</td></tr></table>
414
+
415
+ ![](images/f0aaef4ce0efecddcc98de61a1ce8569145ec9d4edb1b07a4e1c7b148208d2e0.jpg)
416
+
417
+ ![](images/adcfeb3d15b852985396707892db8bdd2fe4b65e8819a587ce3eb14ed3413c3a.jpg)
418
+
419
+ ![](images/f122c4f91a4ad754824c672cfce0541f7b8fa822acd41ec1f3d5838212d36da5.jpg)
420
+
421
+ ![](images/c115ce61773909d88844c3dc9c9f256f52e4a036d7518f98aa40589bec17ba5f.jpg)
422
+
423
+ ![](images/b4061a25ba680b8cb88f25dcc36378c3174b14bf874f7c6e5f87b2bf7fa50180.jpg)
424
+
425
+ ![](images/d47e48225a094855707d621732d050d64d5ffa725c240cad89bcc16bfdddf3c1.jpg)
426
+
427
+ ![](images/f67f1e07ac662ac5c9c2d51a43413dfabac387300186bb40e6f0fb2649eaf196.jpg)
428
+
429
+ ![](images/b14362440cd48cd4ded79598395d1a6a70280578d39a799c318384d7db3f040f.jpg)
430
+
431
+ ![](images/7df8a7c40862957494ced96494c1c28edb55c37ef241814d8bb003ead02d4d77.jpg)
432
+
433
+ ![](images/a4e3c65eda5a73eca36d6d0fe5b8841dfd295b60a7e92220185497f2da6342a9.jpg)
434
+
435
+ ![](images/5cc26411f84c1279e998e54f15a51da36257603c14aa8fe1628720bb5cb2a274.jpg)
436
+ Input
437
+
438
+ ![](images/1dbd2077d299c89def2813eaf8ab624fdbd6ad8092da7c85e17a7cb6884d0cf6.jpg)
439
+
440
+ ![](images/d53c361b963efb454c805d7eaf8891aa74aadbc83e5326247174c6dc0449e430.jpg)
441
+
442
+ ![](images/8be18c3e9ec0e12eed8db769c25d904f5efff074a2627b4b747540819912addf.jpg)
443
+
444
+ ![](images/dc04e11e277af75de2e7aa695c2121accfed5bc2d37d5edd0269958e5c110815.jpg)
445
+
446
+ ![](images/01d6cd3f78e1f35960701ee029c7f558d6eae4d43c04174c05441ee17907c30e.jpg)
447
+
448
+ ![](images/ec85e406c70a6d28166f8e313536cd0665fde94071380f453a2e4c389d46aaaa.jpg)
449
+
450
+ ![](images/d36ef842344b9f3f74ffc587cc133aa642df9f43eca93a80c1d4deaa5fb7d971.jpg)
451
+
452
+ ![](images/20577419b2cb05a9bd376ce113613e6de25799294a47ce82d02aecb16ca3ecb0.jpg)
453
+
454
+ ![](images/b629e9a9b6aaf731d36ad870d8c95431d326674a0152cfacff75fbcf6db307b8.jpg)
455
+
456
+ ![](images/316cdf852369b011d95ee50bd045ede92638061e8cf70aa80159d8b4d7dafd50.jpg)
457
+
458
+ ![](images/49b3ab8369d0cef8cfe7197b5a8d0551f154f085b0c0d4e833a0b3c6e478fab0.jpg)
459
+ Ground truth
460
+
461
+ ![](images/5e8736f72066e8a5d911a26ef314d0cfb84adddcf8c32b017978eb08f7d3b501.jpg)
462
+
463
+ ![](images/708aa7a935c1b34a29b07321b26b5fdccc81f4f8ae254e84d74da8b710d0d53f.jpg)
464
+
465
+ ![](images/1b4b5a2b6e4063e05654ccb80150dec9834d421f902f7254c133800940d1be35.jpg)
466
+
467
+ ![](images/5a5960362579aaeb1b693af3ad420c48a761273cf19785cffe681b9493d31ea9.jpg)
468
+
469
+ ![](images/9ace9c32620dfc0ffceb33c241861106615174ac684a0b72475fbeca30d26421.jpg)
470
+
471
+ ![](images/cc592da21492ec68a0a1df780164897f8ba3bb461a5c177d78f25d5a38cf7034.jpg)
472
+
473
+ ![](images/5c7bc31df8af97fff8404fd1908dedf7ef5b67885670e1784a4f1ac53bc222af.jpg)
474
+
475
+ ![](images/cba6208d9b9c747514ad2cc35fa969cf7b9edf3ca544d604fbeedfc977197e07.jpg)
476
+
477
+ ![](images/0da649bd2c9bb683449b797075f0e46e5f63996161448266aea22c3a769f6bd4.jpg)
478
+
479
+ ![](images/0a6d59d438fa8ab8801a95ceed4946e25b332cf7191cc0721a019d5029dee3ed.jpg)
480
+
481
+ ![](images/5c3895dfa4c12e3fdaddb235c724f3f93553e0ace42445db254851c26c0ed378.jpg)
482
+ Figure A.2: Pix2Pix $\rightarrow$ BasisGAN.
483
+
484
+ ![](images/48fd3a337657e07ebeb4ccc40a8a53b011a7784dcaa2f5cb10401713f74af39c.jpg)
485
+
486
+ ![](images/a299edfbf144aa0d68b73cbd88d2a732e8f6aa95187c6837ca647aa2130add7c.jpg)
487
+
488
+ ![](images/bb467ccb1d4042bc58e80a7019958d6e252ab0faad6868874f4da6b43e32f8db.jpg)
489
+
490
+ ![](images/92cf723077bf835ad1cca6dfdd460e2c0853c5ecf79b6b8a2e2cdff6e7bc9869.jpg)
491
+
492
+ ![](images/9d3ca4ca481cf75ca3f5876e86adc7be839812d6320b3746f4a72fa06207ccad.jpg)
493
+
494
+ ![](images/ec3fea340bc980fa4bfa976786db2b0816c134c694d8d8fffa2dafe44ebbf27a.jpg)
495
+
496
+ ![](images/5889a78b817e0f634d2d6fde2824d147fb7b8c38e4712098c9d47aab94e373e4.jpg)
497
+
498
+ ![](images/4bc6654fa21e8b8c9d8e73ac32ec4cfe3fe2dcebd932ad4560dceb0814ce658d.jpg)
499
+
500
+ ![](images/f7f1a34764a461dd129a5a432b14518c2074c95599c0ed79d75d631204b1ee50.jpg)
501
+
502
+ ![](images/cd9bb419483935c987696574df0118f3723644470339b863c0db30f3533eca55.jpg)
503
+
504
+ ![](images/3f85d9e41f9a476cbfc60d3a36e072efbb277b960e75744c65ce56bfb3c3b864.jpg)
505
+ Generated diverse samples
506
+
507
+ ![](images/6b7613653bd720c66366d80903e50ad4b21fece6eabc903714392175a78ce99b.jpg)
508
+
509
+ ![](images/cc6665226bde9b31d3bf8332eb2f5fc22c88e50147d912e9c22f606c8f8a20dd.jpg)
510
+
511
+ ![](images/fd3e05feff9a69798135cfbe9e49f125f7a6efd64408158a07b572b5772d87d4.jpg)
512
+
513
+ ![](images/c686af85aa28eb7028a1d62fe55d946ffdba1b88724ba44459f76c4f46fa08ce.jpg)
514
+
515
+ ![](images/1a20c1ada269ca384b165fe6a1366085e1ff5a911d4d069dd089775162db5a8b.jpg)
516
+
517
+ ![](images/a5ad31873feceb7d4c94103c8e79f6a5c8f687933976e1ac78fcdcbd25aa35a5.jpg)
518
+
519
+ ![](images/fe5e6dd2b5d45f395004dd99f1bf3c9d3ece45ba00f368112049192919f2ca40.jpg)
520
+
521
+ ![](images/a1eb23e5e8a79440ce51db47ebb3b687a4f58e6a8a97a1c073267beb0328e6b8.jpg)
522
+
523
+ ![](images/4e781c3b3b17afcb3a5f687238efe41e88b410f20208fb7322fe40ccf76cacda.jpg)
524
+
525
+ ![](images/9f3346da68b4f9779dd89a882e86e2f359fc3f63be9f465764d8275a23f0f1d7.jpg)
526
+
527
+ ![](images/10806b02bc58320541474033f2d02ae2fc8ddf3e482df2eff7602f2469274d13.jpg)
528
+
529
+ ![](images/3142e53abe2eda0dce4849f59c72a1183df38de93265517b87535a4c850c9714.jpg)
530
+
531
+ ![](images/3ae3e986c069d92bcf1a679212d865a0197a3fc522eb07b8e1fb43d92ba89f13.jpg)
532
+
533
+ ![](images/d10d595e292b9afbd8b9bf0e4478fe63cc5eb95aa8c3a4c7db1602327ddd564f.jpg)
534
+
535
+ ![](images/c301e7322423eb0b2b9322719f0f1ab914193169a13ed6903a302389ab8f06ac.jpg)
536
+
537
+ ![](images/92aade789656d59e6590dee2d763215f0a9a7e42e321ef6d67ac0f18359331e1.jpg)
538
+
539
+ ![](images/8af178c875666c9698c4c44d8686c369cc810040e4365395e9215f40099538bd.jpg)
540
+
541
+ ![](images/43429159ce7796553e2ced1e1e3e00e087fabb8cef807a8a95ced8081db56132.jpg)
542
+
543
+ ![](images/f41b038c0533e08ba204833e159621e4b163d92f72c46d55962deba8eb8c6061.jpg)
544
+
545
+ ![](images/41e4a33e4695fac8c4220483ba145b44dca3ee2f7b485e2342da8e7a814d419b.jpg)
546
+
547
+ ![](images/cb4190e3737b343a9ed5ae53afd479a2406821f8655877002cea1937070ff596.jpg)
548
+
549
+ ![](images/7df8fd06c6226f4917f5394bf3193fa36b2f2743e8d38d082c5141b0d8340864.jpg)
550
+
551
+ ![](images/46fcc90af14cf68b4f8e52daeea0ffa48cec3d4f55d2fda67876ec877472263e.jpg)
552
+ Figure A.3: Qualitative comparisons with MSGAN and DSGAN. Please zoom in for details.
553
+
554
+ ![](images/31af10d50d720ee6a3ad8c447c76fa95747c2c0e7c0fe0b71ea244291a0a6998.jpg)
555
+
556
+ ![](images/35108318e3df7a0ecbdedc0e24854edaf2cfa15be8db0b7ce696e284f35f7d17.jpg)
557
+
558
+ ![](images/5e46200280aae11bb5586f0d65b6aa78bcf57ff77196532f20662229b462f6eb.jpg)
559
+
560
+ ![](images/26c5ed699dac1c4d238726763bac84c2dd105e5424559e05cb566b957ac33155.jpg)
561
+
562
+ ![](images/6732992b222c52a5b8d056eefefc86c24f1117e64a7f286ab33029d16e07bd37.jpg)
563
+
564
+ ![](images/5f08484694672d67920ca49d6281bc7ca9c4165d3835b6ad1d442e3c6147f088.jpg)
565
+ Input condition
566
+
567
+ ![](images/25fc3bacafda9df2040fa92825e9a220663d941b80d7d5387914bb36221f764a.jpg)
568
+
569
+ ![](images/fd2a84a0aaff5364bbd07bc0573c5e34464a94a1f28c36f9daea6db31e6acc2e.jpg)
570
+
571
+ ![](images/aff86de23945ce7901cffda31bca68d36b7e4b2e492dde98bb49fe46f5181c03.jpg)
572
+
573
+ ![](images/e5a5447d1cd26b5232bbe756fb21095d5e086cfcb5011a465ef690f8d75b6811.jpg)
574
+
575
+ ![](images/8240473b2749389861cb84fbfa02faf45fa122588806c55a178e25ede9f989c4.jpg)
576
+
577
+ ![](images/aed642af8bd9cad039deebaaab0d5d5327577042d6d97b9f462c16d1c403e5fd.jpg)
578
+ Generated diverse samples
579
+
580
+ ![](images/f9de1066e90a93edf9e4467cdf1cd9acde515f54d5e8a90fea7c9d5ef6592804.jpg)
581
+
582
+ ![](images/e9a13a1de34232914659aa335a59cb0f04be2936840d69c0cbf3186db9e00bc3.jpg)
583
+
584
+ ![](images/ff3e8df60aac00cf86881a9c8cdfac2e42c9ada522bc11e2cfabfe0c234d00b6.jpg)
585
+
586
+ ![](images/42d1a9a8f3733cc7350a9ea14610653c99e37280db08b48cc4acadf1daff5b03.jpg)
587
+
588
+ ![](images/1ea0c7d26452d1dae97883683aa4b14ee7b30bf29e9c90c8a02865ec1678dc17.jpg)
589
+
590
+ ![](images/675a850bd427e868dadcca995878a9ff01ab21625cdc4b1e0485a424906bfe11.jpg)
591
+ Figure A.4: Pix2PixHD $\rightarrow$ BasisGAN.
stochasticconditionalgenerativenetworkswithbasisdecomposition/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b327ffbc95e6245e8c430b892843221ca7fb8e6e26297c1aaf286b3ee3e7fd5
3
+ size 1571868
stochasticconditionalgenerativenetworkswithbasisdecomposition/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f0cd181fe5038b6e7c9f4a9f179af064ae99a2bb07f91fff03ed76565e800da
3
+ size 741445
stochasticweightaveraginginparallellargebatchtrainingthatgeneralizeswell/5c9254ab-6ea1-4d34-b7cd-bb0d48c2d31c_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1613ff7d46127f3a490495770d80a0ee9ed42ef756a8088f0d54d09ddd4e1b3
3
+ size 65272
stochasticweightaveraginginparallellargebatchtrainingthatgeneralizeswell/5c9254ab-6ea1-4d34-b7cd-bb0d48c2d31c_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cff8114123b9db9f7304387fae16e64bcc4bcd7dac7416e8b7942560ee4804aa
3
+ size 79419
stochasticweightaveraginginparallellargebatchtrainingthatgeneralizeswell/5c9254ab-6ea1-4d34-b7cd-bb0d48c2d31c_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d69762d96cbadb94eacef7b41f027eca70592b7abdc0de189417f9505220344e
3
+ size 1265739
stochasticweightaveraginginparallellargebatchtrainingthatgeneralizeswell/full.md ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # STOCHASTIC WEIGHT AVERAGING IN PARALLEL: LARGE-BATCH TRAINING THAT GENERALIZES WELL
2
+
3
+ Vipul Gupta*†
4
+
5
+ vipul_gupta@berkeley.edu
6
+
7
+ Department of EECS, UC Berkeley
8
+
9
+ Santiago Akle Serrano *
10
+
11
+ sakle@apple.com
12
+
13
+ Apple Inc.
14
+
15
+ Dennis DeCoste
16
+
17
+ ddecoste@apple.com
18
+
19
+ Apple Inc.
20
+
21
+ # ABSTRACT
22
+
23
+ We propose Stochastic Weight Averaging in Parallel (SWAP), an algorithm to accelerate DNN training. Our algorithm uses large mini-batches to compute an approximate solution quickly and then refines it by averaging the weights of multiple models computed independently and in parallel. The resulting models generalize equally well as those trained with small mini-batches but are produced in a substantially shorter time. We demonstrate the reduction in training time and the good generalization performance of the resulting models on the computer vision datasets CIFAR10, CIFAR100, and ImageNet.
24
+
25
+ # 1 INTRODUCTION
26
+
27
+ Stochastic gradient descent (SGD) and its variants are the de-facto methods to train deep neural networks (DNNs). Each iteration of SGD computes an estimate of the objective's gradient by sampling a mini-batch of the available training data and computing the gradient of the loss restricted to the sampled data. A popular strategy to accelerate DNN training is to increase the mini-batch size together with the available computational resources. Larger mini-batches produce more precise gradient estimates; these allow for higher learning rates and achieve larger reductions of the training loss per iteration. In a distributed setting, multiple nodes can compute gradient estimates simultaneously on disjoint subsets of the mini-batch and produce a consensus estimate by averaging all estimates, with one synchronization event per iteration. Training with larger mini-batches requires fewer updates, thus fewer synchronization events, yielding good overall scaling behavior.
28
+
29
+ Even though the training loss can be reduced more efficiently, there is a maximum batch size after which the resulting model tends to have worse generalization performance (McCandlish et al., 2018; Keskar et al., 2016; Hoffer et al., 2017; Golmant et al., 2018; Shallue et al., 2018). This phenomenon forces practitioners to use batch sizes below those that achieve the maximum throughput and limits the usefulness of large-batch training strategies.
30
+
31
+ Stochastic Weight Averaging (SWA) (Izmailov et al., 2018) is a method that produces models with good generalization performance by averaging the weights of a set of models sampled from the final stages of a training run. As long as the models all lie in a region where the population loss is mostly convex, the average model can behave well, and in practice, it does.
32
+
33
+ We have observed that if instead of sampling multiple models from a sequence generated by SGD, we generate multiple independent SGD sequences and average models from each, the resulting model achieves similar generalization performance. Furthermore, if all the independent sequences use small-batches, but start from a model trained with large-batches, the resulting model achieves generalization performance comparable with a model trained solely with small-batches. Using these observations, we derive Stochastic Weight Averaging in Parallel (SWAP): A simple strategy to accelerate DNN training by better utilizing available compute resources. Our algorithm is simple to implement, fast and produces good results with minor tuning.
34
+
35
+ For several image classification tasks on popular computer vision datasets (CIFAR10, CIFAR100, and ImageNet), we show that SWAP achieves generalization performance comparable to models trained with small-batches but does so in time similar to that of a training run with large-batches. We use SWAP on some of the most efficient publicly available models to date, and show that it's
36
+
37
+ able to substantially reduce their training times. Furthermore, we are able to beat the state of the art for CIFAR10 and train in $68\%$ of the time of the winning entry of the DAWNBench competition. $^{1}$
38
+
39
+ # 2 RELATED WORK
40
+
41
+ The mechanism by which the training batch size affects the generalization performance is still unknown. A popular explanation is that because of the reduced noise, a model trained using larger mini-batches is more likely to get stuck in a sharper global minima. In (Keskar et al., 2016), the authors argue that sharp minima are sensitive to variations in the data because slight shifts in the location of the minimizer will result in large increases in average loss value. However, if flatness is taken to be the curvature as measured by the second order approximation of the loss, then counterexamples exist. In (Dinh et al., 2017), the authors transform a flat minimizer into a sharp one without changing the behavior of the model, and in (Li et al., 2018), the authors show the reverse behavior when weight-decay is not used.
42
+
43
+ In (McCandlish et al., 2018), the authors predict that the batch size can be increased up to a critical size without any drop in accuracy and empirically validate this claim. For example, the accuracy begins to drop for image classification on CIFAR10 when the batch sizes exceed 1k samples. They postulate that when the batch size is large, the mini-batch gradient is close to the full gradient, and further increasing the batch size will not significantly improve the signal to noise ratio.
44
+
45
+ In (Hoffer et al., 2017), the authors argue that, for a fixed number of epochs, using a larger batch size implies fewer model updates. They argue that changing the number of updates impacts the distance the weights travel away from their initialization and that this distance determines the generalization performance. They show that by training with large-batches for longer times (thus increasing the number of updates), the generalization performance of the model is recovered. Even though this large-batch strategy generates models that generalize well, it does so in more time than the small-batch alternative.
46
+
47
+ Irrespective of the generalization performance, the batch size also affects the optimization process. In (Ma et al., 2017), the authors show that for convex functions in the over-parameterized setting, there is a critical batch size below which an iteration with a batch size of $M$ is roughly equivalent to $M$ iterations with a batch size of one, and batch-sizes larger than $M$ do not improve the rate of convergence.
48
+
49
+ Methods which use adaptive batch sizes exist (Devarakonda et al., 2017; Goyal et al., 2017; Jia et al., 2018; Smith et al., 2017; You et al., 2017). However, most of these methods are either designed for specific datasets or require extensive hyper-parameter tuning. Furthermore, they ineffectively use the computational resources by reducing the batch size during part of the training.
50
+
51
+ Local SGD (Zhang et al., 2016; Stich, 2018; Li et al., 2019; Yu et al., 2019) is a distributed optimization algorithm that trades off gradient precision with communication costs by allowing workers to independently update their models for a few steps before synchronizing. Post-local SGD (Lin et al., 2018) is a variant, which refines the output of large-batch training with local-SGD. The authors have observed that the resulting model has better generalization than the model trained with large-batches and that their scheme achieves significant speedups. In this manner Post-local SGD is of a very similar vein than the present work. However, while Post-local SGD lets the models diverge for $T$ iterations where $T$ is in the order of tens, SWAP averages the models once after multiple epochs. For example, in our Imagenet experiments (see Sec. 5) we average our models after tens of thousands of updates, while Post-local SGD does after at most 32. Because of this difference, we believe that the mechanisms that power the success of SWAP and Post-local SGD must be different and point to different phenomena in DNN optimization.
52
+
53
+ Stochastic weight averaging (SWA) (Izmailov et al., 2018) is a method where models are sampled from the later stages of an SGD training run. When the weights of these models are averaged, they result in a model with much better generalization properties. This strategy is very effective and has been adopted in multiple domains: deep reinforcement learning (Nikishin et al.), semi-supervised learning (Athiwaratkun et al., 2019), Bayesian inference (Maddox et al., 2019), low-precision training (Yang et al., 2019). In this work, we adapt SWA to accelerate DNN training.
54
+
55
+ # 3 STOCHASTIC WEIGHT AVERAGING IN PARALLEL
56
+
57
+ We describe SWAP as an algorithm in three phases (see Algorithm 1): In the first phase, all workers train a single model by computing large mini-batch updates. Synchronization between workers is required at each iteration and a higher learning rate is used. In the second phase, each worker independently refines its copy of the model to produce a different set of weights. Workers use a smaller batch size, a lower learning rate, and different randomizations of the data. No synchronization between workers is required in this phase. The last phase consists of averaging the weights of the resulting models and computing new batch-normalization statistics to produce the final output.
58
+
59
+ Phase 1 is terminated before the training loss reaches zero or the training accuracy reaches $100\%$ (for example, a few percentage points below $100\%$ ). We believe that stopping early precludes the optimization from getting stuck at a location where the gradients are too small and allows the following stage to improve the generalization performance. However, the optimal stopping accuracy is a hyper-parameter that requires tuning.
60
+
61
+ During phase 2, the batch size is appropriately reduced and small-batch training is performed independently and simultaneously. Here, each worker (or a subset of them) performs training using all the data, but sampling in different random order. Thus, after the end of the training process, each worker (or subset) will have produced a different model.
62
+
63
+ Figure 1 plots the accuracies and learning-rate schedules for a run of SWAP. During the large-batch phase (phase 1), all workers share a common model and have the same generalization performance. During the small-batch phase (phase 2) the learning rates for all the workers are the same but their testing accuracies differ as the stochasticity causes the models to diverge from each other. We also plot the test-accuracy of the averaged model that would result were we to stop phase 2 at that point. Note that the averaged model performs consistently better than each individual model.
64
+
65
+ ![](images/a6884e864bfe0333ce040fa809354fab54ac897ce0415f66902aad402ec39418.jpg)
66
+ Figure 1: Learning rate schedules and CIFAR10 test accuracies for workers participating in SWAP. The large-batch phase with synchronized models is followed by the small-batch phase with diverging independent models. The test accuracy of the averaged weight model is computed by averaging the independent models and computing the test loss for the resulting model.
67
+
68
+ # 4 LOSS LANDSCAPE VISUALIZATION AROUND SWAP ITERATES
69
+
70
+ To visualize the mechanism behind SWAP, we plot the error achieved by our test network on a plane that contains the outputs of the three different phases of the algorithm. Inspired by (Garipov et al., 2018) and (Izmailov et al., 2018), we pick orthogonal vectors $u, v$ that span the plane which contains $\theta_{1}, \theta_{2}, \theta_{3}$ . We plot the loss value generated by model $\theta = \theta_{1} + \alpha u + \beta v$ at the location $(\alpha, \beta)$ . To plot a loss value, we first generate a weight vector $\theta$ , compute the batch-norm statistics for that model (through one pass over the training data), and then evaluate the test and train accuracies.
71
+
72
+ In Figure 2, we plot the training and testing error for the CIFAR10 dataset. Here 'LB' marks the output of phase one, 'SGD' the output of a single worker after phase two, and 'SWAP' the final
73
+
74
+ Algorithm 1: Stochastic Weight Averaging in Parallel (SWAP)
75
+ 1 Number of workers $W$ ; Weight initialization $\theta_0$ ; $t = 0$
76
+ 2 Training accuracy, $\tau$ , at which to exit phase one
77
+ 3 Learning rate schedules $LR_{1}$ and $LR_{2}$ for phase one and two, respectively
78
+ 4 Mini-batch sizes $B_{1}$ and $B_{2}$ for phase one and two, respectively
79
+ 5 Gradient of loss function for sample $i$ at weight $\theta$ : $g^{i}$
80
+ 6 SGDUpdate( $\cdot$ ): A function that updates the weights using SGD with momentum and weight decay
81
+ 7 Phase 1:
82
+ 8 while Training accuracy $\leq \tau$ do
83
+ 9 $\eta \gets LR_{1}(t)$
84
+ 10 for $w$ in $[0, \dots, W - 1]$ In parallel do
85
+ 11 $\begin{array}{c} B^{w} \gets \text{random sub-sample of training data with size } \frac{B_{1}}{W} \\ g^{w} \gets \frac{W}{|B_{1}|} \sum_{i \in B^{w}} g^{i} \text{ worker gradient} \end{array}$
86
+ 12 end
87
+ 13 end
88
+ 14 $g_{t} \gets \frac{1}{W} \sum g^{w}$ synchronization of worker gradients
89
+ 15 $\theta_{t+1} \gets \theta_{t} + \text{SGDUpdate}(\eta_{t}, g_{t}, g_{t-1}, \dots)$ ; /* first order method update */
90
+ 16 $t = t + 1$ ; $T = t$
91
+ 17 end
92
+ 18 Phase 2:
93
+ 19 for $t$ in $[T, T + Q]$ do
94
+ 20 $\eta \gets LR_{2}(t - T)$
95
+ 21 for $w$ in $[0, \dots, W - 1]$ In parallel do
96
+ 22 $\begin{array}{r} B^{w} \gets \text{random sub-sample of training data with size } B_{2} \\ g^{w} \gets \frac{1}{|B_{2}|} \sum_{i \in B^{w}} g^{i} \text{ worker gradient} \\ \theta_{t+1}^{w} \gets \theta_{t}^{w} + \text{SGDUpdate}(\eta_{t}, g_{t}^{w}, g_{t-1}^{w}, \dots); \\ \text{update at local worker * / } \end{array}$
97
+ 23 end
98
+ 24 end
99
+ 25 We get $W$ different models at the end of phase 2
100
+ 26 Phase 3: $\hat{\theta}_{\ell} \gets \frac{1}{W} \sum \theta_{T+Q}^{i}$ produce averaged model
101
+ 27 Compute batch-norm statistics for $\hat{\theta}_{\ell}$ to produce $\theta_{\ell}$
102
+ Result: Final model $\theta_{\ell}$
103
+
104
+ model. Color codes correspond to error measures at the points interpolated on the plane. In Figure 2a, we observe that the level-sets of the training error (restricted to this plane) form an almost convex basin and that both the output of phase 1 ('LB') $^2$ and the output of one of the workers of phase 2 ('SGD') lie in the outer edges of the basin. Importantly, during phase 2 the model traversed to a different side of the basin (and not to the center). Also, the final model ('SWAP') is closer to the center of the basin.
105
+
106
+ When we visualize these three points on the test loss landscape (Figure 2b), we observe that the variations in the topology of the basin cause the 'LB' and 'SGD' points to fall in regions of higher error. But, since the 'SWAP' point is closer to the center of the basin, it is less affected by the change in topology. In Figure 3, we neglect the 'LB' point and plot the plane spanned by three workers 'SGD1', 'SGD2', 'SGD3'. In Figure 3a, we can observe that these points lie at different sides of the training error basin while 'SWAP' is closer to the center. In Figure 3b, we observe that the change in topology causes the worker points to lie in regions of higher testing errors than 'SWAP', which is again close to the center of both basins. For reference, we have also plotted the best model that can be generated by this region of the plane.
107
+
108
+ ![](images/524e8971b1da32b7b1f14235044e66ede66388492063027047f08a6a41a56727.jpg)
109
+ (a) Train Error $(\%)$
110
+
111
+ ![](images/2e4170ee94dc50ff181b1eeea1693dc8fec6d315e1851d7e8a4d47798e0d765a.jpg)
112
+ (b) Test Error $(\%)$
113
+
114
+ ![](images/4139744704de45f97aa1f838aabaeae92f71e7124db98082e13a32513a7b09ea.jpg)
115
+ (a) Train Error $(\%)$
116
+ Figure 3: CIFAR10 train and test error restricted to a 2D plane spanned by the output of three workers after phase 2 ('SGD1', 'SGD2', 'SGD3') and location of the average model ('SWAP'). The minimum test error achievable for models restricted to this region of the plane (marked as BEST).
117
+
118
+ ![](images/a6b41008b176dbd72ce23d8a9b9af8bed5dd87b3f81559a267ce2ebc2a5f4867.jpg)
119
+ Figure 2: CIFAR10 train and test error restricted to a 2D plane spanned by the output of phase 1 ('LB'), one of the outputs of phase 2 ('SGD') and the averaged model ('SWAP').
120
+ (b) Test Error $(\%)$
121
+
122
+ # 4.1 SAMPLING FROM INDEPENDENT RUNS OF SGD OR SAMPLING FROM ONE
123
+
124
+ In (Mamd et al., 2017), the authors argue that in the later stages of SGD the weight iterates behave similar to an Ornstein Uhlenbeck process. So, by maintaining a constant learning rate the SGD iterates should reach a stationary distribution that is similar to a high-dimensional Gaussian. This distribution is centered at the local minimum, has a covariance that grows proportionally with the learning rate, inversely proportional to the batch size and has a shape that depends on both the Hessian of the mean loss and covariance of the gradient.
125
+
126
+ The authors of (Izmailov et al., 2018) argue that by virtue of being a high dimensional Gaussian all the mass of the distribution is concentrated near the 'shell' of the ellipsoid, and therefore, it is unlikely for SGD to access the interior. They further argue that by sampling weights from an SGD run (leaving enough time steps between them) will choose weights that are spread out on the surface of this ellipsoid and their average will be closer to the center.
127
+
128
+ Without any further assumptions, we can justify sampling from different SGD runs (as done in phase 2 during SWAP). As long as all runs start in the same basin of attraction, and provided the model from (Mamd et al., 2017) holds, all runs will converge to the same stationary distribution, and each run can generate independent samples from it.
129
+
130
+ # 4.2 ORTHOGONALITY OF THE GRADIENT AND THE DIRECTION TO THE CENTER OF BASIN
131
+
132
+ To win some intuition on the advantage that SWA and SWAP have over SGD, we measure the cosine similarity between the gradient descent direction, $-g_{i}$ , and the direction towards the output of SWAP, $\Delta \theta = \theta_{\mathrm{swap}} - \theta_{i}$ . In Figure 4, we see that the cosine similarity, $\frac{\langle\Delta\theta, - g_i\rangle}{\|g_i\|\|\Delta\theta\|}$ , decreases as the training enters its later stages. We believe that towards the end of training, the angle between the gradient direction and the directions toward the center of the basin is large, therefore the process moves mostly orthogonally to the basin, and progress slows. However, averaging samples from different sides of the basin can (and does) make faster progress towards the center.
133
+
134
+ ![](images/26740bb2f968747bbbed20ddbd94999dba1252af19a32f4180608b465ec31469.jpg)
135
+ Figure 4: Cosine similarity between direction of gradient descent and $\Delta \theta$
136
+
137
+ # 5 EXPERIMENTS
138
+
139
+ In this section we evaluate the performance of SWAP for image classification tasks on the CIFAR10, CIFAR100, and ImageNet datasets.
140
+
141
+ # 5.1 CIFAR10 AND CIFAR100
142
+
143
+ For the experiments in this subsection, we found the best hyper-parameters using grid searches (see Appendix A for details). We train using mini-batch SGD with Nesterov momentum (set to 0.9) and weight decay of $5 \times 10^{-4}$ . We augment the data using cutout (DeVries & Taylor, 2017) and use a fast-to-train custom ResNet 9 from a submission to the DAWNbench leaderboard (Coleman et al.). All experiments were run on one machine with 8 NVIDIA Tesla V100 GPUs and use Horovod (Sergeev & Del Balso, 2018) to distribute the computation. All statistics were collected over 10 different runs.
144
+
145
+ CIFAR10: For these experiments, we used the following settings—SWAP phase one: 4096 samples per batch using 8 GPUs (512 samples per GPU). Phase one is terminated when the training accuracy reaches $98\%$ (on average 108 epochs). SWAP phase two: 8 workers with one GPU each and 512 samples per batch for 30 epochs. The experiment that uses only large-batches had 4096 samples per batch across 8 GPUs and is run for 150 epochs. The experiments that use only small-batches had 512 samples per batch on 2 GPUs and is trained for 100 epochs.
146
+
147
+ Table 1 compares the best test accuracies and corresponding training times for models trained with small-batch only, with large-batch only, and with SWAP. We report the average accuracy of the workers before averaging and the accuracy of the final model.
148
+
149
+ <table><tr><td>CIFAR10</td><td>Test Accuracy (%)</td><td>Training Time (sec)</td></tr><tr><td>SGD (small-batch)</td><td>95.24 ± 0.09</td><td>254.12 ± 0.62</td></tr><tr><td>SGD (large-batch)</td><td>94.77 ± 0.23</td><td>132.62 ± 1.09</td></tr><tr><td>SWAP (before averaging)</td><td>94.70 ± 0.20</td><td>167.57 ± 3.25</td></tr><tr><td>SWAP (after averaging)</td><td>95.23 ± 0.08</td><td>169.20 ± 3.25</td></tr></table>
150
+
151
+ CIFAR100: For these experiments, we use the following settings—SWAP phase one: 2048 samples per batch using 8 GPUs (256 samples per GPU). Phase one exits when the training accuracy reaches $90\%$ (on average 112 epochs). SWAP phase two: 8 workers with one GPU each and 128 samples per batch, training for 10 epochs. The experiments that use only large-batch training were run for 150 epochs with batches of 2048 on 8 GPUs. The experiments that use only small-batch were trained for 150 epochs using batches of 128 on 1 GPU.
152
+
153
+ Table 1: Training Statistics for CIFAR10
154
+
155
+ <table><tr><td>CIFAR100</td><td>Test Accuracy (%)</td><td>Training Time (sec)</td></tr><tr><td>SGD (small-batch)</td><td>77.01 ± 0.25</td><td>573.76 ± 2.25</td></tr><tr><td>SGD (large-batch)</td><td>75.84 ± 0.35</td><td>116.13 ± 1.35</td></tr><tr><td>SWAP (before averaging)</td><td>75.74 ± 0.15</td><td>123.11 ± 1.85</td></tr><tr><td>SWAP (after averaging)</td><td>78.18 ± 0.21</td><td>125.34 ± 1.85</td></tr></table>
156
+
157
+ Table 2: Training Statistics for CIFAR100
158
+
159
+ Table 2 compares the best test accuracies and corresponding training times for models trained with only small-batches (for 150 epochs), with only large-batches (for 150 epochs), and with SWAP.
160
+
161
+ For SWAP, we report test accuracies obtained using the last SGD iterate before averaging, and test accuracy of the final model obtained after averaging. We observe significant improvement in test accuracies after averaging the models.
162
+
163
+ For both CIFAR 10 and CIFAR100, training with small-batches achieves higher testing accuracy than training with large-batches but takes much longer to train. SWAP, however, terminates in time comparable to the large-batch run but achieves accuracies on par (or better) than small batch training.
164
+
165
+ Achieving state of the art training speeds for CIFAR10: At the time of writing the frontrunner of the DAWNbench competition takes 37 seconds with 4 Tesla V100 GPUs to train CIFAR10 to $94\%$ test accuracy. Using SWAP with 8 Tesla V100 GPUs, a phase one batch size of 2048 samples and 28 epochs, and a phase two batch size of 256 samples for one epoch is able to reach the same accuracy in 27 seconds.
166
+
167
+ # 5.2 EXPERIMENTS ON IMAGENET
168
+
169
+ We use SWAP to accelerate a publicly available fast-to-train ImageNet model with published learning rate and batch size schedules $^{4}$ . The default settings for this code modify the learning-rates and batch sizes throughout the optimization (see Figure 5). Our small-batch experiments train ImageNet for 28 epochs using the published schedules with no modification and are run on 8 Tesla V100 GPUs. Our large-batch experiments modify the schedules by doubling the batch size and doubling the learning rates (see Figure 5) and are run on 16 Tesla V100 GPUs. For SWAP phase 1, we use the large-batch settings for 22 epochs, and for SWAP phase 2, we run two independent workers each with 8 GPUs using the settings for small-batches for 6 epochs.
170
+
171
+ We observe that doubling the batch size reduces the Top1 and Top5 test accuracies with respect to the small-batch run. SWAP, however, recovers the generalization performance at substantially reduced training times. Our results are compiled in Table 3 (the statistics were collected over 3 runs). We believe it's worthy of mention that these accelerations were achieved with no tuning other than increasing the learning rates proportionally to the increase in batch size and reverting to the original schedule when transitioning between phases.
172
+
173
+ ![](images/c0e71149bad3ec35676dbeddbc5324b8f313cdd370cf620017e9b833fcb9da38.jpg)
174
+ (a) Learning rate schedule
175
+
176
+ ![](images/7a2fac7aa56f9cb7dc6d7b27cb100ec1595f6be991301025835f4cc635e9de64.jpg)
177
+ (b) Batch sizes across epochs for ImageNet
178
+ Figure 5: Learning rate and mini-batch schedules used for ImageNet. The original schedule for 8 GPUs was taken from an existing DAWNbench submission. For a larger batch experiment, we double the batch size, double the number of GPUs and double the learning rate of the original schedule. For SWAP, we switch from the modified schedule to the original schedule as we move from phase 1 to phase 2.
179
+
180
+ <table><tr><td>ImageNet</td><td>Top1 Accuracy (%)</td><td>Top5 Accuracy (%)</td><td>Training Time (min)</td></tr><tr><td>SGD (small-batch)</td><td>76.14 ± 0.07</td><td>93.30 ± 0.07</td><td>235.29 ± 0.33</td></tr><tr><td>SGD (large-batch)</td><td>75.86 ± 0.03</td><td>92.98 ± 0.06</td><td>127.20 ± 0.78</td></tr><tr><td>SWAP (before averaging)</td><td>75.96 ± 0.02</td><td>93.15 ± 0.02</td><td>149.12 ± 0.55</td></tr><tr><td>SWAP (after averaging)</td><td>76.19 ± 0.03</td><td>93.32 ± 0.02</td><td>156.55 ± 0.56</td></tr></table>
181
+
182
+ Table 3: Training Statistics for ImageNet
183
+
184
+ ![](images/ca347d7379a433f90c74f2783e34c0d57e9a65f585d4d2e5aeb2dacbd7f00659.jpg)
185
+ (a) Large-batch SWA
186
+
187
+ ![](images/2530afb3eba438d0c55f356ab0628d895d8e35f67e101130f771e794b5d6128c.jpg)
188
+ (b) Large-batch training followed by SWA with small-batches
189
+
190
+ ![](images/b1f3cac654167b7685e54f6604fc8af3cded4b35eaf626ed487579ad4a626b4c.jpg)
191
+ (c) Small-batch SWA
192
+ Figure 6: Illustration of SWA with different batch sizes
193
+
194
+ # 5.3 EMPIRICAL COMPARISON OF SWA AND SWAP
195
+
196
+ We now compare SWAP with SWA: the sequential weight averaging algorithm from Izmailov et al. (2018). For the experiments in this section, we use the CIFAR100 dataset. We sample the same number of models for both SWA and SWAP and maintain the same number of epochs per sample. For SWA, we sample each model with 10 epochs in-between and average them to get the final model. For SWAP, we run 8 independent workers for 10 epochs each and use their average as the final model.
197
+
198
+ Large-batch SWA: We explore if SWA can recover the test accuracy of small-batch training on a large-batch training run. We use the same (large) batch size throughout. We follow an initial training cycle with cyclic learning rates (with cycles of 10 epochs) to sample 8 models (one from the end of each cycle). See Figure 6a for an illustration of the learning rate schedule.
199
+
200
+ As expected we observe that the large-batch training run achieves lower training accuracy, but surprisingly SWA was unable to improve it (see Table 4, row 1).
201
+
202
+ Large-batch followed by small-batch SWA: We evaluate the effect of executing SWA using small-batches after a large-batch training run. We interrupt the large-batch phase at the same accuracy we interrupt phase 1 of our CIFAR100 experiment (Table 2). In this case, the small-batch phase uses a single worker and samples the models sequentially. SWA is able to reach the test accuracy of a small-batch run but requires more than three times longer than SWAP to compute the model (see Table 4, row 2). An illustration of the learning rate schedule is provided in Figure 6b.
203
+
204
+ Small-batch SWA and SWAP: We start the SWA cyclic learning rate schedule from the best model found by solely small-batch training (table 2, row 1). Since the cycle length and cycle count are fixed, the only free parameter is the peak learning rate. We select this using a grid-search. Once the SWA schedule is specified, we re-use the peak learning rate settings in SWAP. We start phase two from the model that was generated as the output of phase 1 for the experiment on section 5.1 reported on table 2 rows 3 and 4. With these settings, small-batch SWA achieves better accuracy than SWAP (by around $\sim 0.9\%$ ) at 6.8x more training time.
205
+
206
+ Next, we wish to explore the speed-up that SWAP achieves over SWA if the precision of SWA is set as a target. To that end, we relax the constraints on SWAP. By increasing the phase two schedule from one 10 epoch cycle to two 20 epoch cycles and sampling two models from each worker (16 models) the resulting model achieved a test accuracy of $79.11\%$ in 241 seconds or $3.5\mathrm{x}$ less time.
207
+
208
+ <table><tr><td>CIFAR100</td><td>Test accuracy before averaging (%)</td><td>Test accuracy after averaging (%)</td><td>Training Time (sec)</td></tr><tr><td>Large-batch SWA</td><td>76.06 ± 0.25</td><td>76.00 ± 0.31</td><td>376.4 ± 2.25</td></tr><tr><td>Large-batch followed by small-batch SWA</td><td>76.26 ± 0.35</td><td>78.12 ± 0.14</td><td>398.0 ± 1.35</td></tr><tr><td>Small-batch SWA</td><td>76.80 ± 0.15</td><td>79.09 ± 0.19</td><td>848.6 ± 5.61</td></tr><tr><td>SWAP (10 small-batch epochs)</td><td>75.74 ± 0.15</td><td>78.18 ± 0.21</td><td>125.30 ± 1.85</td></tr><tr><td>SWAP (40 small-batch epochs)</td><td>76.19 ± 0.19</td><td>79.11 ± 0.12</td><td>241.54 ± 1.62</td></tr></table>
209
+
210
+ Table 4: Comparison: SWA versus SWAP
211
+
212
+ # 6 CONCLUSIONS AND FUTURE WORK
213
+
214
+ We propose Stochastic Weight Averaging in Parallel (SWAP), an algorithm that uses a variant of Stochastic Weight Averaging (SWA) to improve the generalization performance of a model trained with large mini-batches. Our algorithm uses large mini-batches to compute an approximate solution quickly and then refines it by averaging the weights of multiple models trained using small-batches. The final model obtained after averaging has good generalization performance and is trained in a shorter time. We believe that this variant and this application of SWA are novel.
215
+
216
+ We observed that using large-batches in the initial stages of training does not preclude the models from achieving good generalization performance. That is, by refining the output of a large-batch run, with models sampled sequentially as in SWA or in parallel as in SWAP, the resulting model is able to perform as well as the models trained using small-batches only. We confirm this in the image classification datasets CIFAR10, CIFAR100, and ImageNet.
217
+
218
+ Through visualizations, we complement the existing evidence that averaged weights are closer to the center of a training loss basin than the models produced by stochastic gradient descent. It's interesting to note that the basin into which the large mini-batch run is converging to seems to be the same basin where the refined models are found. So, it is possible that regions with bad and good generalization performance are connected through regions of low training loss and, more so, that both belong to an almost convex basin. Our method requires the choice of (at least) one more hyperparameter: the transition point between the large-batch and small-batch. For our experiments, we chose this by using a grid search. A principled method to choose the transition point will be the focus of future work.
219
+
220
+ In future work we intend to explore the behavior of SWAP when used with other optimization schemes, such as Layer-wise Adaptive Rate Scaling (LARS) (You et al., 2017), mixed-precision training Jia et al. (2018), post-local SGD (Lin et al., 2018) or NovoGrad (Ginsburg et al., 2019). The design of SWAP allows us to substitute any of these for the large-batch stage, for example, we can use local SGD to accelerate the first stage of SWAP by reducing the communication overhead.
221
+
222
+ # REFERENCES
223
+
224
+ Ben Athiwaratkun, Marc Finzi, Pavel Izmailov, and Andrew Gordon Wilson. There are many consistent explanations of unlabeled data: Why you should average. In International Conference on Learning Representations, 2019. URL https://openreview.net/forum?id=rkgKBhA5Y7.
225
+ Cody Coleman, Deepak Narayanan, Daniel Kang, Tian Zhao, Jian Zhang, Luigi Nardi, Peter Bailis, Kunle Olukotun, Chris Re, and Matei Zaharia. Dawnbench: An end-to-end deep learning benchmark and competition.
226
+ Aditya Devarakonda, Maxim Naumov, and Michael Garland. Adabatch: Adaptive batch sizes for training deep neural networks. CoRR, abs/1712.02029, 2017. URL http://arxiv.org/abs/1712.02029.
227
+ Terrance DeVries and Graham W Taylor. Improved regularization of convolutional neural networks with cutout. arXiv preprint arXiv:1708.04552, 2017.
228
+ Laurent Dinh, Razvan Pascanu, Samy Bengio, and Yoshua Bengio. Sharp minima can generalize for deep nets. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 1019-1028. JMLR.org, 2017.
229
+ Timur Garipov, Pavel Izmailov, Dmitrii Podoprikhin, Dmitry P Vetrov, and Andrew G Wilson. Loss surfaces, mode connectivity, and fast ensembling of dnns. In Advances in Neural Information Processing Systems, pp. 8789-8798, 2018.
230
+ Boris Ginsburg, Patrice Castonguay, Oleksii Hrinchuk, Oleksii Kuchaiev, Vitaly Lavrukhin, Ryan Leary, Jason Li, Huyen Nguyen, and Jonathan M Cohen. Stochastic gradient methods with layerwise adaptive moments for training of deep networks. arXiv preprint arXiv:1905.11286, 2019.
231
+ Noah Golmant, Nikita Vemuri, Zhewei Yao, Vladimir Feinberg, Amir Gholami, Kai Rothauge, Michael W Mahoney, and Joseph Gonzalez. On the computational inefficiency of large batch sizes for stochastic gradient descent. arXiv preprint arXiv:1811.12941, 2018.
232
+ Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch sgd: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017.
233
+ Elad Hoffer, Itay Hubara, and Daniel Soudry. Train longer, generalize better: closing the generalization gap in large batch training of neural networks. In NIPS, 2017.
234
+ Pavel Izmailov, Dmitrii Podoprikhin, Timur Garipov, Dmitry Vetrov, and Andrew Gordon Wilson. Averaging weights leads to wider optima and better generalization. arXiv preprint arXiv:1803.05407, 2018.
235
+ Xianyan Jia, Shutao Song, Wei He, Yangzihao Wang, Haidong Rong, Feihu Zhou, Liqiang Xie, Zhenyu Guo, Yuzhou Yang, Liwei Yu, et al. Highly scalable deep learning training system with mixed-precision: TrainingImagenet in four minutes. arXiv preprint arXiv:1807.11205, 2018.
236
+ Nitish Shirish Keskar, Dheevatsa Mudigere, Jorge Nocedal, Mikhail Smelyanskiy, and Ping Tak Peter Tang. On large-batch training for deep learning: Generalization gap and sharp minima. arXiv preprint arXiv:1609.04836, 2016.
237
+ Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. In Advances in Neural Information Processing Systems, pp. 6389-6399, 2018.
238
+ Tian Li, Anit Kumar Sahu, Ameet Talwalkar, and Virginia Smith. Federated learning: Challenges, methods, and future directions. arXiv preprint arXiv:1908.07873, 2019.
239
+ Tao Lin, Sebastian U Stich, Kumar Kshitij Patel, and Martin Jaggi. Don't use large mini-batches, use local sgd. arXiv preprint arXiv:1808.07217, 2018.
240
+
241
+ Siyuan Ma, Raef Bassily, and Mikhail Belkin. The power of interpolation: Understanding the effectiveness of sgd in modern over-parametrized learning. arXiv preprint arXiv:1712.06559, 2017.
242
+ Wesley Maddox, Timur Garipov, Pavel Izmailov, Dmitry Vetrov, and Andrew Gordon Wilson. A simple baseline for bayesian uncertainty in deep learning. arXiv preprint arXiv:1902.02476, 2019.
243
+ Stephan Mandt, Matthew D Hoffman, and David M Blei. Stochastic gradient descent as approximate bayesian inference. The Journal of Machine Learning Research, 18(1):4873-4907, 2017.
244
+ Sam McCandlish, Jared Kaplan, Dario Amodei, and OpenAI Dota Team. An empirical model of large-batch training. arXiv preprint arXiv:1812.06162, 2018.
245
+ Evgenii Nikishin, Pavel Izmailov, Ben Athiwaratkun, Dmitrii Podoprikhin, Timur Garipov, Pavel Shvechikov, Dmitry Vetrov, and Andrew Gordon Wilson. Improving stability in deep reinforcement learning with weight averaging.
246
+ Alexander Sergeev and Mike Del Balso. Horovod: fast and easy distributed deep learning in tensorflow. arXiv preprint arXiv:1802.05799, 2018.
247
+ Christopher J Shallue, Jaehoon Lee, Joe Antognini, Jascha Sohl-Dickstein, Roy Frostig, and George E Dahl. Measuring the effects of data parallelism on neural network training. arXiv preprint arXiv:1811.03600, 2018.
248
+ Samuel L Smith, Pieter-Jan Kindermans, Chris Ying, and Quoc V Le. Don't decay the learning rate, increase the batch size. arXiv preprint arXiv:1711.00489, 2017.
249
+ Sebastian U Stich. Local sgd converges fast and communicates little. arXiv preprint arXiv:1805.09767, 2018.
250
+ Guandao Yang, Tianyi Zhang, Polina Kirichenko, Junwen Bai, Andrew Gordon Wilson, and Chris De Sa. SWALP: Stochastic weight averaging in low precision training. In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.), Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pp. 7015-7024, Long Beach, California, USA, 09-15 Jun 2019. PMLR.
251
+ Yang You, Igor Gitman, and Boris Ginsburg. Scaling sgd batch size to 32k forImagenet training. 2017.
252
+ Hao Yu, Sen Yang, and Shenghuo Zhu. Parallel restarted sgd with faster convergence and less communication: Demystifying why model averaging works for deep learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 5693-5700, 2019.
253
+ Jian Zhang, Christopher De Sa, Ioannis Mitliagkas, and Christopher Ré. Parallel sgd: When does averaging help? ArXiv, abs/1606.07365, 2016.
254
+
255
+ # A HYPERPARAMETERS FOR CIFAR10 AND CIFAR100 EXPERIMENTS
256
+
257
+ We provide the parameters used in the experiments of Section 5.1. These were obtained by doing independent grid searches for each experiment. For all CIFAR experiments, the momentum and weight decay constants were kept at 0.9 and $5 \times 10^{-4}$ respectively. Tables 5 and 6 list the remaining hyperparameters. When a stopping accuracy of $100\%$ is listed, we mean that the maximum number of epochs were used.
258
+
259
+ <table><tr><td>CIFAR10</td><td>SGD (small-batch)</td><td>SGD (large-batch)</td><td>SWAP (Phase 1)</td><td>SWAP (Phase 2)</td></tr><tr><td>Batch-size</td><td>512</td><td>4096</td><td>4096</td><td>512</td></tr><tr><td>Learning-rate Peak</td><td>0.3</td><td>1.2</td><td>1.2</td><td>0.12</td></tr><tr><td>Maximum Epochs</td><td>100</td><td>150</td><td>150</td><td>30</td></tr><tr><td>Warm-up Epochs</td><td>30</td><td>30</td><td>30</td><td>0</td></tr><tr><td>GPUs used per model</td><td>2</td><td>8</td><td>8</td><td>1</td></tr><tr><td>Stopping Accuracy (%)</td><td>100</td><td>100</td><td>98</td><td>100</td></tr></table>
260
+
261
+ Table 5: Hyperparameters obtained using tuning for CIFAR10
262
+
263
+ <table><tr><td>CIFAR100</td><td>SGD (small-batch)</td><td>SGD (large-batch)</td><td>SWAP (Phase 1)</td><td>SWAP (Phase 2)</td></tr><tr><td>Batch-size</td><td>128</td><td>2048</td><td>2048</td><td>128</td></tr><tr><td>Learning-rate Peak</td><td>0.2</td><td>1.2</td><td>1.2</td><td>0.05</td></tr><tr><td>Total Epochs</td><td>150</td><td>150</td><td>150</td><td>30</td></tr><tr><td>Warm-up Epochs</td><td>60</td><td>45</td><td>45</td><td>0</td></tr><tr><td>GPUs used per model</td><td>1</td><td>8</td><td>8</td><td>1</td></tr><tr><td>Stopping Accuracy (%)</td><td>100</td><td>100</td><td>90</td><td>100</td></tr></table>
264
+
265
+ Table 6: Hyperparameters obtained using tuning for CIFAR100
stochasticweightaveraginginparallellargebatchtrainingthatgeneralizeswell/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d177a627981febec4beebd8296fe6ce905fbdb0019c9eadb3456e2246e5a00ea
3
+ size 377379
stochasticweightaveraginginparallellargebatchtrainingthatgeneralizeswell/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbe439ad7e4944a21ba2550ac4930fb563931a5ad3de3109f81a2fb4b9e80484
3
+ size 320597
structbertincorporatinglanguagestructuresintopretrainingfordeeplanguageunderstanding/77c07111-eec3-4d5b-8ceb-4b81d0b63175_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf69cdf8032fcb56da2155d4dcf14ddae3f8e1cc65178bdf07bad2a6cd53cf69
3
+ size 63438
structbertincorporatinglanguagestructuresintopretrainingfordeeplanguageunderstanding/77c07111-eec3-4d5b-8ceb-4b81d0b63175_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25899a2c787e1f3dce81e0293808e7b89281cddb34c1aa2d4135bfcfdf900ab4
3
+ size 76329
structbertincorporatinglanguagestructuresintopretrainingfordeeplanguageunderstanding/77c07111-eec3-4d5b-8ceb-4b81d0b63175_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:002832a3e62dc307e0c4c589e1bb05032d5a2e3d65fa69eb49512ed7caabee56
3
+ size 414419
structbertincorporatinglanguagestructuresintopretrainingfordeeplanguageunderstanding/full.md ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # STRUCTBERT: INCORPORATING LANGUAGE STRUCTURES INTO PRE-TRAINING FOR DEEP LANGUAGE UNDERSTANDING
2
+
3
+ Wei Wang, Bin Bi, Ming Yan, Chen Wu, Jiangnan Xia, Zuyi Bao, Liwei Peng and Luo Si Alibaba Group Inc.
4
+
5
+ {hebian.ww, b.bi, yml19608, wuchen.wc, jiangnan.xjn, zuyi.bzy, liwei.peng, luo.si}@alibaba-inc.com
6
+
7
+ # ABSTRACT
8
+
9
+ Recently, the pre-trained language model, BERT (and its robustly optimized version RoBERTa), has attracted a lot of attention in natural language understanding (NLU), and achieved state-of-the-art accuracy in various NLU tasks, such as sentiment classification, natural language inference, semantic textual similarity and question answering. Inspired by the linearization exploration work of Elman (Elman, 1990), we extend BERT to a new model, StructBERT, by incorporating language structures into pre-training. Specifically, we pre-train StructBERT with two auxiliary tasks to make the most of the sequential order of words and sentences, which leverage language structures at the word and sentence levels, respectively. As a result, the new model is adapted to different levels of language understanding required by downstream tasks.
10
+
11
+ The StructBERT with structural pre-training gives surprisingly good empirical results on a variety of downstream tasks, including pushing the state-of-the-art on the GLUE benchmark to 89.0 (outperforming all published models at the time of model submission), the F1 score on SQuAD v1.1 question answering to 93.0, the accuracy on SNLI to 91.7.
12
+
13
+ # 1 INTRODUCTION
14
+
15
+ A pre-trained language model (LM) is a key component in many natural language understanding (NLU) tasks such as semantic textual similarity (Cer et al., 2017), question answering (Rajpurkar et al., 2016) and sentiment classification (Socher et al., 2013). In order to obtain reliable language representations, neural language models are designed to define the joint probability function of sequences of words in text with self-supervised learning. Different from traditional word-specific embedding in which each token is assigned a global representation, recent work, such as Cove (McCann et al., 2017), ELMo (Peters et al., 2018), GPT (Radford et al., 2018) and BERT (Devlin et al., 2018), derives contextualized word vectors from a language model trained on a large text corpus. These models have been shown effective for many downstream NLU tasks.
16
+
17
+ Among the context-sensitive language models, BERT (and its robustly optimized version RoBERTa (Liu et al., 2019b)) has taken the NLP world by storm. It is designed to pre-train bidirectional representations by jointly conditioning on both left and right context in all layers and model the representations by predicting masked words only through the contexts. However, it does not make the most of underlying language structures.
18
+
19
+ According to Elman (Elman, 1990)'s study, the recurrent neural networks was shown to be sensitive to regularities in word order in simple sentences. Since language fluency is determined by the ordering of words and sentences, finding the best permutation of a set of words and sentences is an essential problem in many NLP tasks, such as machine translation and NLU (Hasler et al., 2017). Recently, word ordering was treated as LM-based linearization solely based on language models (Schmaltz et al., 2016). Schmaltz showed that recurrent neural network language models (Mikolov et al., 2010) with long short-term memory (Hochreiter & Schmidhuber, 1997) cells work effectively for word ordering even without any explicit syntactic information.
20
+
21
+ In this paper, we introduce a new type of contextual representation, StructBERT, which incorporates language structures into BERT pre-training by proposing two novel linearization strategies. Specifically, in addition to the existing masking strategy, StructBERT extends BERT by leveraging the structural information: word-level ordering and sentence-level ordering. We augment model pre-training with two new structural objectives on the inner-sentence and inter-sentence structures, respectively. In this way, the linguistic aspects (Elman, 1990) are explicitly captured during the pre-training procedure. With structural pre-training, StructBERT encodes dependency between words as well as sentences in the contextualized representation, which provides the model with better generalizability and adaptability.
22
+
23
+ StructBERT significantly advances the state-of-the-art results on a variety of NLU tasks, including the GLUE benchmark (Wang et al., 2018), the SNLI dataset (Bowman et al., 2015) and the SQuAD v1.1 question answering task (Rajpurkar et al., 2016). All of these experimental results clearly demonstrate StructBERT's exceptional effectiveness and generalization capability in language understanding.
24
+
25
+ We make the following major contributions:
26
+
27
+ - We propose novel structural pre-training that extends BERT by incorporating the word structural objective and the sentence structural objective to leverage language structures in contextualized representation. This enables the StructBERT to explicitly model language structures by forcing it to reconstruct the right order of words and sentences for correct prediction.
28
+ - StructBERT significantly outperforms all published state-of-the-art models on a wide range of NLU tasks at the time of model submission. This model extends the superiority of BERT, and boosts the performance in many language understanding applications such as semantic textual similarity, sentiment analysis, textual entailment, and question answering.
29
+
30
+ # 2 STRUCTBERT MODEL PRE-TRAINING
31
+
32
+ StructBERT builds upon the BERT architecture, which uses a multi-layer bidirectional Transformer network (Vaswani et al., 2017). Given a single text sentence or a pair of text sentences, BERT packs them in one token sequence and learns a contextualized vector representation for each token. Every input token is represented based on the word, the position, and the text segment it belongs to. Next, the input vectors are fed into a stack of multi-layer bidirectional Transformer blocks, which uses self-attention to compute the text representations by considering the entire input sequence.
33
+
34
+ The original BERT introduces two unsupervised prediction tasks to pre-train the model: i.e., a masked LM task and a next sentence prediction task. Different from original BERT, our StructBERT amplifies the ability of the masked LM task by shuffling certain number of tokens after word masking and predicting the right order. Moreover, to better understand the relationship between sentences, StructBERT randomly swaps the sentence order and predicts the next sentence and the previous sentence as a new sentence prediction task. In this way, the new model not only explicitly captures the fine-grained word structure in every sentence, but also properly models the inter-sentence structure in a bidirectional manner. Once the StructBERT language model is pre-trained with these two auxiliary tasks, we can fine-tune it on task-specific data for a wide range of downstream tasks.
35
+
36
+ # 2.1 INPUT REPRESENTATION
37
+
38
+ Every input $x$ is a sequence of word tokens, which can be either a single sentence or a pair of sentences packed together. The input representation follows that used in BERT (Devlin et al., 2018). For each input token $t_i$ , its vector representation $\mathbf{x}_i$ is computed by summing the corresponding token embedding, positional embedding, and segment embedding. We always add a special classification embedding ([CLS]) as the first token of every sequence, and a special end-of-sequence ([SEP]) token to the end of each segment. Texts are tokenized to subword units by WordPiece (Wu et al., 2016) and absolute positional embeddings are learned with supported sequence lengths up to 512 tokens. In addition, the segment embeddings are used to differentiate a pair of sentences as in BERT.
39
+
40
+ ![](images/9d83fed7df606054e3c60ea884ca3db5c2dac2f9ac95a0024dbe2e356bdfea69.jpg)
41
+ (a) Word Structural Objective
42
+
43
+ ![](images/a9dee39eb89c6c068eb6a1781c7956da8f5a8e54a8f77b43010f956be6391c74.jpg)
44
+ (b) Sentence Structural Objective
45
+ Figure 1: Illustrations of the two new pre-training objectives
46
+
47
+ # 2.2 TRANSFORMER ENCODER
48
+
49
+ We use a multi-layer bidirectional Transformer encoder (Vaswani et al., 2017) to encode contextual information for input representation. Given the input vectors $\mathbf{X} = \{\mathbf{x}_i\}_{i=1}^N$ , an $L$ -layer Transformer is used to encode the input as:
50
+
51
+ $$
52
+ \mathbf {H} ^ {l} = \text {T r a n s f o r m e r l} (\mathbf {H} ^ {l - 1}) \tag {1}
53
+ $$
54
+
55
+ where $l \in [1, L]$ , $\mathbf{H}^0 = \mathbf{X}$ and $\mathbf{H}^L = [\mathbf{h}_1^L, \dots, \mathbf{h}_N^L]$ . We use the hidden vector $\mathbf{h}_i^L$ as the contextualized representation of the input token $t_i$ .
56
+
57
+ # 2.3 PRE-TRAINING OBJECTIVES
58
+
59
+ To make full use of the rich inner-sentence and inter-sentence structures in language, we extend the pre-training objectives of original BERT in two ways: ① word structural objective (mainly for the single-sentence task), and ② sentence structural objective (mainly for the sentence-pair task). We pre-train these two auxiliary objectives together with the original masked LM objective in a unified model to exploit inherent language structures.
60
+
61
+ # 2.3.1 WORD STRUCTURAL OBJECTIVE
62
+
63
+ Despite its success in various NLU tasks, original BERT is unable to explicitly model the sequential order and high-order dependency of words in natural language. Given a set of words in random order from a sentence, ideally a good language model should be able to recover this sentence by reconstructing the correct order of these words. To implement this idea in StructBERT, we supplement BERT's training objectives with a new word structural objective which endows the model with the ability to reconstruct the right order of certain number of intentionally shuffled word tokens. This new word objective is jointly trained together with the original masked LM objective from BERT.
64
+
65
+ Figure 1a illustrates the procedure of jointly training the new word objective and the masked LM objective. In every input sequence, we first mask $15\%$ of all tokens at random, as done in BERT (Devlin et al., 2018). The corresponding output vectors $\mathbf{h}_i^L$ of the masked tokens computed by the bidirectional Transformer encoder are fed into a softmax classifier to predict the original tokens.
66
+
67
+ Next, the new word objective comes into play to take word order into consideration. Given the randomness of token shuffling, the word objective is equivalent to maximizing the likelihood of placing every shuffled token in its correct position. More formally, this objective can be formulated as:
68
+
69
+ $$
70
+ \arg \max _ {\theta} \sum \log P \left(\operatorname {p o s} _ {1} = t _ {1}, \operatorname {p o s} _ {2} = t _ {2}, \dots , \operatorname {p o s} _ {K} = t _ {K} \mid t _ {1}, t _ {2}, \dots , t _ {K}, \theta\right), \tag {2}
71
+ $$
72
+
73
+ where $\theta$ represents the set of trainable parameters of StructBERT, and $K$ indicates the length of every shuffled subsequence. Technically, a larger $K$ would force the model to be able to reconstruct longer sequences while injecting more disturbed input. On the contrary, when $K$ is smaller, the model gets more undisturbed sequences while less capable of recovering long sequences. We decide to use trigrams (i.e., $K = 3$ ) for subsequence shuffling to balance language reconstructability and robustness of the model.
74
+
75
+ Specifically, as shown in Figure 1a, we randomly choose some percentage of trigrams from unmasked tokens, and shuffle the three words (e.g., $t_2$ , $t_3$ , and $t_4$ in the figure) within each of the trigrams. The output vectors of the shuffled tokens computed by the bidirectional Transformer encoder are then fed into a softmax classifier to predict the original tokens. The new word objective is jointly learned together with the masked LM objective in a unified pre-trained model with equal weights.
76
+
77
+ # 2.3.2 SENTENCE STRUCTURAL OBJECTIVE
78
+
79
+ The next sentence prediction task is considered easy for the original BERT model (the prediction accuracy of BERT can easily achieve $97\% - 98\%$ in this task (Devlin et al., 2018)). We, therefore, extend the sentence prediction task by predicting both the next sentence and the previous sentence, to make the pre-trained language model aware of the sequential order of the sentences in a bidirectional manner.
80
+
81
+ As illustrated in Figure 1b, given a pair of sentences $(S_{1}, S_{2})$ as input, we predict whether $S_{2}$ is the next sentence that follows $S_{1}$ , or the previous sentence that precedes $S_{1}$ , or a random sentence from a different document. Specifically, for the sentence $S_{1}$ , $\frac{1}{3}$ of the time we choose the text span that follows $S_{1}$ as the second sentence $S_{2}$ , $\frac{1}{3}$ of the time the previous sentence ahead of $S_{1}$ is selected, and $\frac{1}{3}$ of the time a sentence randomly sampled from the other documents is used as $S_{2}$ . The two sentences are concatenated together into an input sequence with the separator token [SEP] in between, as done in BERT. We pool the model output by taking the hidden state corresponding to the first token [CLS], and feed the encoding vector of [CLS] into a softmax classifier to make a three-class prediction.
82
+
83
+ # 2.4 PRE-TRAINING SETUP
84
+
85
+ The training objective function is a linear combination of the word structural objective and the sentence structural objective. For the masked LM objective, we followed the same masking rate and settings as in BERT (Devlin et al., 2018). $5\%$ of trigrams are selected for random shuffling.
86
+
87
+ We used documents from English Wikipedia (2,500M words) and BookCorpus (Zhu et al., 2015) as pre-training data, following the preprocessing and the WordPiece tokenization from (Devlin et al., 2018). The maximum length of input sequence was set to 512.
88
+
89
+ We ran Adam with learning rate of 1e-4, $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ , L2 weight decay of 0.01, learning rate warm-up over the first $10\%$ of the total steps, and linear decay of the learning rate. We set a dropout probability of 0.1 for every layer. The gelu activation (Hendrycks & Gimpel, 2016) was used as done in GPT (Radford et al., 2018).
90
+
91
+ We denote the number of Transformer block layers as $L$ , the size of hidden vectors as $H$ , and the number of self-attention heads as $A$ . Following the practice of BERT, We primarily report experimental results on the two model sizes:
92
+
93
+ StructBERTBase: $L = 12$ , $H = 768$ , $A = 12$ , Number of parameters= 110M
94
+
95
+ StructBERTLarge: $L = 24$ , $H = 1024$ , $A = 16$ , Number of parameters=340M
96
+
97
+ Pre-training of StructBERT was performed on a distributed computing cluster consisting of 64 Telsa V100 GPU cards. For the StructBERTBase, we ran the pre-training procedure for 40 epochs, which took about 38 hours, and the training of StructBERTLarge took about 7 days to complete.
98
+
99
+ # 3 EXPERIMENTS
100
+
101
+ In this section, we report results of StructBERT on a variety of downstream tasks including General Language Understanding Evaluation (GLUE benchmark), Standford Natural Language inference (SNLI corpus) and extractive question answering (SQuAD v1.1).
102
+
103
+ Following BERT's practice, during fine-tuning on downstream tasks, we performed a grid search or an exhaustive search (depending on the data size) on the following sets of parameters and chose the model that performed the best on the dev set. All the other parameters remain the same as those in pre-training:
104
+
105
+ Batch size: 16, 24, 32; Learning rate: 2e-5, 3e-5, 5e-5; Number of epochs: 2, 3; Dropout rate: 0.05, 0.1
106
+
107
+ <table><tr><td>System</td><td>CoLA 8.5k</td><td>SST-2 67k</td><td>MRPC 3.5k</td><td>STS-B 5.7k</td><td>QQP 363k</td><td>MNLI 392k</td><td>QNLI 108k</td><td>RTE 2.5k</td><td>WNLI 634</td><td>AX</td><td>Avg.</td></tr><tr><td>Human Baseline</td><td>66.4</td><td>97.8</td><td>86.3/80.8</td><td>92.7/92.6</td><td>59.5/80.4</td><td>92.0/92.8</td><td>91.2</td><td>93.6</td><td>95.9</td><td>-</td><td></td></tr><tr><td>BERTLarge1</td><td>60.5</td><td>94.9</td><td>89.3/85.4</td><td>87.6/86.5</td><td>72.1/89.3</td><td>86.7/85.9</td><td>92.7</td><td>70.1</td><td>65.1</td><td>39.6</td><td>80.5</td></tr><tr><td>BERT on STILTs2</td><td>62.1</td><td>94.3</td><td>90.2/86.6</td><td>88.7/88.3</td><td>71.9/89.4</td><td>86.4/85.6</td><td>92.7</td><td>80.1</td><td>65.1</td><td>28.3</td><td>82.0</td></tr><tr><td>SpanBERT3</td><td>64.3</td><td>94.8</td><td>90.9/87.9</td><td>89.9/89.1</td><td>71.9/89.5</td><td>88.1/87.7</td><td>94.3</td><td>79.0</td><td>65.1</td><td>45.1</td><td>82.8</td></tr><tr><td>Snorkel MeTaL4</td><td>63.8</td><td>96.2</td><td>91.5/88.5</td><td>90.1/89.7</td><td>73.1/89.9</td><td>87.6/87.2</td><td>93.9</td><td>80.9</td><td>65.1</td><td>39.9</td><td>83.2</td></tr><tr><td>MT-DNN++5</td><td>65.4</td><td>95.6</td><td>91.1/88.2</td><td>89.6/89.0</td><td>72.7/89.6</td><td>87.9/87.4</td><td>95.8</td><td>85.1</td><td>65.1</td><td>41.9</td><td>83.8</td></tr><tr><td>MT-DNN*5</td><td>65.4</td><td>96.5</td><td>92.2/89.5</td><td>89.6/89.0</td><td>73.7/89.9</td><td>87.9/87.4</td><td>96.0</td><td>85.7</td><td>65.1</td><td>42.8</td><td>84.2</td></tr><tr><td>StructBERTBase</td><td>57.2</td><td>94.7</td><td>89.9/86.1</td><td>88.5/87.6</td><td>72.0/89.6</td><td>85.5/84.6</td><td>92.6</td><td>76.9</td><td>65.1</td><td>39.0</td><td>80.9</td></tr><tr><td>StructBERTLarge</td><td>65.3</td><td>95.2</td><td>92.0/89.3</td><td>90.3/89.4</td><td>74.1/90.5</td><td>88.0/87.7</td><td>95.7</td><td>83.1</td><td>65.1</td><td>43.6</td><td>83.9</td></tr><tr><td>StructBERTLarge*</td><td>68.6</td><td>95.2</td><td>92.5/90.1</td><td>91.1/90.6</td><td>74.4/90.7</td><td>88.2/87.9</td><td>95.7</td><td>83.1</td><td>65.1</td><td>43.9</td><td>84.5</td></tr><tr><td>XLNet*6</td><td>67.8</td><td>96.8</td><td>93.0/90.7</td><td>91.6/91.1</td><td>74.2/90.3</td><td>90.2/89.8</td><td>98.6</td><td>86.3</td><td>90.4</td><td>47.5</td><td>88.4</td></tr><tr><td>RoBERTa*7</td><td>67.8</td><td>96.7</td><td>92.3/89.8</td><td>92.2/91.9</td><td>74.3/90.2</td><td>90.8/90.2</td><td>98.9</td><td>88.2</td><td>89.0</td><td>48.7</td><td>88.5</td></tr><tr><td>Adv-RoBERTa*</td><td>68.0</td><td>96.8</td><td>93.1/90.8</td><td>92.4/92.2</td><td>74.8/90.3</td><td>91.1/90.7</td><td>98.8</td><td>88.7</td><td>89.0</td><td>50.1</td><td>88.8</td></tr><tr><td>StructBERTRoBERTa*</td><td>69.2</td><td>97.1</td><td>93.6/91.5</td><td>92.8/92.4</td><td>74.4/90.7</td><td>90.7/90.3</td><td>99.2</td><td>87.3</td><td>89.7</td><td>47.8</td><td>89.0</td></tr></table>
108
+
109
+ Table 1: Results of published models on the GLUE test set, which are scored by the GLUE evaluation server. The number below each task denotes the number of training examples. The state-of-the-art results are in bold. All the results are obtained from https://gluebenchmark.com/leaderboard (StructBERT submitted under a different model name ALICE). * indicates the ensemble model. Model references: ${}^{1}$ : (Devlin et al.,2018); ${}^{2}$ : (Phang et al.,2018); ${}^{3}$ : (Joshi et al., 2019); ${}^{4}$ : (Ratner et al.,2017); ${}^{5}$ : (Liu et al.,2019a); ${}^{6}$ : (Yang et al.,2019b); ${}^{7}$ : (Liu et al.,2019b).
110
+
111
+ # 3.1 GENERAL LANGUAGE UNDERSTANDING
112
+
113
+ # 3.1.1 GLUE BENCHMARK
114
+
115
+ The General Language Understanding Evaluation (GLUE) benchmark (Wang et al., 2018) is a collection of nine NLU tasks, covering textual entailment (RTE (Bentivogli et al., 2009) and MNLI (Williams et al., 2017)), question-answer entailment (QNLI (Wang et al., 2018)), paraphrase (MRPC (Dolan & Brockett, 2005)), question paraphrase (QQP), textual similarity (STS-B (Cer et al., 2017)), sentiment (SST-2 (Socher et al., 2013)), linguistic acceptability (CoLA), and Winograd Schema (WNLI (Levesque et al., 2012)).
116
+
117
+ On the GLUE benchmark, given the similarity of MRPC/RTE/STS-B to MNLI, we fine-tuned StructBERT on MNLI before training on MRPC/RTE/STS-B data for the respective tasks. This follows the two-stage transfer learning STILTs introduced in (Phang et al., 2018). For all the other tasks (i.e., RTE, QNLI, QQP, SST-2, CoLA and MNLI), we fine-tuned StructBERT for each single task only on its in-domain data.
118
+
119
+ Table 1 presents the results of published models on the GLUE test set obtained from the official benchmark evaluation server. Our StructBERTLarge ensemble suppressed all published models (excluding RoBERTa ensemble and XLNet ensemble) on the average score, and performed the best among these models in six of the nine tasks. In the most popular MNLI task, our StructBERTLarge single model improved the best result by $0.3\% / 0.5\%$ , since we fine-tuned MNLI only on its in-domain data, this improvement is entirely attributed to our new training objectives. The most significant improvement over BERT was observed on CoLA $(4.8\%)$ , which may be due to the strong correlation between the word order task and the grammatical error correction task. In the SST-2 task, our model improved over BERT while performed worse than MT-DNN did, which indicates that sentiment analysis based on single sentences benefits less from the word structural objective and sentence structural objective.
120
+
121
+ <table><tr><td>Model</td><td>GPT</td><td>BERT</td><td>MT-DNN</td><td>SJRC</td><td>StructBERTLarge</td></tr><tr><td>Dev</td><td>-</td><td>90.1</td><td>91.4</td><td>-</td><td>92.2</td></tr><tr><td>Test</td><td>89.9</td><td>90.8</td><td>91.1</td><td>91.3</td><td>91.7</td></tr></table>
122
+
123
+ Table 2: Accuracy $(\%)$ on the SNLI dataset.
124
+
125
+ <table><tr><td rowspan="2">System</td><td colspan="2">Dev set</td><td colspan="2">Test set</td></tr><tr><td>EM</td><td>F1</td><td>EM</td><td>F1</td></tr><tr><td>Human</td><td>-</td><td>-</td><td>82.3</td><td>91.2</td></tr><tr><td>XLNet(single+DA) (Yang et al., 2019b)</td><td>88.9</td><td>94.5</td><td>89.9</td><td>95.0</td></tr><tr><td>BERT ensemble+DA) (Devlin et al., 2018)</td><td>86.2</td><td>92.2</td><td>87.4</td><td>93.2</td></tr><tr><td>KT-NET(single) (Yang et al., 2019a)</td><td>85.1</td><td>91.7</td><td>85.9</td><td>92.4</td></tr><tr><td>BERT(single+DA) (Devlin et al., 2018)</td><td>84.2</td><td>91.1</td><td>85.1</td><td>91.8</td></tr><tr><td>QANet(ensemble+DA) (Yu et al., 2018)</td><td>-</td><td>-</td><td>84.5</td><td>90.5</td></tr><tr><td>StructBERTLarge (single)</td><td>85.2</td><td>92.0</td><td>-</td><td>-</td></tr><tr><td>StructBERTLarge (ensemble)</td><td>87.0</td><td>93.0</td><td>-</td><td>-</td></tr></table>
126
+
127
+ Table 3: SQuAD results. The StructBERTLarge ensemble is 10x systems which use different pretraining checkpoints and fine-tuning seeds.
128
+
129
+ With pre-training on large corpus, XLNet ensemble and RoBERTa ensemble outperformed all published models including our StructBERTLarge ensemble. To take advantage of the large data which RoBERTa is trained on, we continued pre-training with our two new objectives from the released RoBERTa model, named StructBERTRoBERTa. At the time of model submission, our StructBERTRoBERTa ensemble, which was submitted under a different name ALICE, achieved the best performance among all published models including RoBERTa and XLNet on the leaderboard, creating a new state-of-the-art result of $89.0\%$ on the average GLUE score. It demonstrates that the proposed objectives are able to improve language models in addition to BERT.
130
+
131
+ # 3.1.2 SNLI
132
+
133
+ Natural Language Inference (NLI) is one of the important tasks in natural language understanding. The goal of this task is to test the ability of the model to reason the semantic relationship between two sentences. In order to perform well on an NLI task, a model needs to capture the semantics of sentences, and thus to infer the relationship between a pair of sentences: entailment, contradiction or neutral.
134
+
135
+ We evaluated our model on the most widely used NLI dataset: The Stanford Natural Language Inference (SNLI) Corpus (Bowman et al., 2015), which consists of 549,367/9,842/9,824 premise-hypothesis pairs in train/dev/test sets and target labels indicating their relations. We performed a grid search on the sets of parameters, and chose the model that performed best on the dev set.
136
+
137
+ Table 2 shows the results on the SNLI dataset of our model with other published models. StructBERT outperformed all existing systems on SNLI, creating new state-of-the-art results $91.7\%$ , which amounts to $0.4\%$ absolute improvement over the previous state-of-the-art model SJRC and $0.9\%$ absolute improvement over BERT. Since the network architecture of our model is identical to that of BERT, this improvement is entirely attributed to the new pre-training objectives, which justifies the effectiveness of the proposed tasks of word prediction and sentence prediction.
138
+
139
+ # 3.2 EXTRACTIVE QUESTION ANSWERING
140
+
141
+ SQuAD v1.1 is a popular machine reading comprehension dataset consisting of 100,000+ questions created by crowd workers on 536 Wikipedia articles (Rajpurkar et al., 2016). The goal of the task is to extract the right answer span from the corresponding paragraph given a question.
142
+
143
+ We fine-tuned our StructBERT language model on the SQuAD dataset for 3 epochs, and compared the result against the state-of-the-art methods on the official leaderboard ${}^{1}$ ,as shown in Table 3. We can see that even without any additional data augmentation (DA) techniques, the proposed StructBERT model was superior to all published models except XLNet+DA on the dev set. ${}^{2}$ . With data augmentation and large corpus used during pre-training,XLNet+DA outperformed our StructBERT which did not
144
+
145
+ <table><tr><td>Task</td><td>CoLA (Acc)</td><td>SST-2 (Acc)</td><td>MNLI (Acc)</td><td>SNLI (Acc)</td><td>QQP (Acc)</td><td>SQuAD (F1)</td></tr><tr><td>StructBERTBase</td><td>85.8</td><td>92.9</td><td>85.4</td><td>91.5</td><td>91.1</td><td>90.6</td></tr><tr><td>-word structure</td><td>81.7</td><td>92.7</td><td>85.2</td><td>91.6</td><td>90.7</td><td>90.3</td></tr><tr><td>-sentence structure</td><td>84.9</td><td>92.9</td><td>84.1</td><td>91.1</td><td>90.5</td><td>89.1</td></tr><tr><td>BERTBase</td><td>80.9</td><td>92.7</td><td>84.1</td><td>91.3</td><td>90.4</td><td>88.5</td></tr></table>
146
+
147
+ Table 4: Ablation over the pre-training objectives using StructBERTBase architecture. Every result is the average score of 8 runs with different random seeds (the MNLI accuracy is the average score of the matched and mis-matched settings).
148
+
149
+ use data augmentation or large pre-training corpus. It demonstrates the effectiveness of the proposed pre-trained StructBERT in modeling the question-paragraph relationship for extractive question answering. Incorporating the word and sentence structures significantly improves the understanding ability in this fine-grained answer extraction task.
150
+
151
+ # 3.3 EFFECT OF DIFFERENT STRUCTURAL OBJECTIVES
152
+
153
+ We have demonstrated the strong empirical results of the proposed model on a variety of downstream tasks. In the StructBERT pre-training, the two new structural prediction tasks are the most important components. Therefore, we conducted an ablation study by removing one structural objective from pre-training at a time to examine how the two structural objectives influence the performance on various downstream tasks.
154
+
155
+ Results are presented in Table 4. From the table, we can see that: (1) the two structural objectives were both critical to most of the downstream tasks, except for the word structural objective in the SNLI task. Removing any word or sentence objective from pre-training always led to degraded performance in the downstream tasks. The StructBERT model with structural pre-training consistently outperformed the original BERT model, which shows the effectiveness of the proposed structural objectives. (2) For the sentence-pair tasks such as MNLI, SNLI, QQP and SQuAD, incorporating the sentence structural objective significantly improved the performance. It demonstrates the effect of inter-sentence structures learned by pre-training in understanding the relationship between sentences for downstream tasks. (3) For the single-sentence tasks such as CoLA and SST-2, the word structural objective played the most important role. Especially in the CoLA task, which is related to the grammatical error correction, the improvement was over $5\%$ . The ability of reconstructing the order of words in pre-training helped the model better judge the acceptability of a single sentence.
156
+
157
+ We also studied the effect of both structural objectives during self-supervised pre-training. Figure 2 illustrates the loss and accuracy of word and sentence prediction over the number of pre-training steps for StructBERTBase and BERTBase. From the two sub-figures on top, it is observed that
158
+
159
+ ![](images/bb721398bae887b623bf5d6b3eee3b1305d4033656a68f72eabbaab92412f622.jpg)
160
+
161
+ ![](images/d71add1b96b1d37140a6d40a98b07010b93f9945f186ec3ced9c7c78f0f84168.jpg)
162
+
163
+ ![](images/932840e9b82ded024d72b94a5298a069fd24d97f5d255718ae2152cdabe721e3.jpg)
164
+ Figure 2: Loss and accuracy of word and sentence prediction over the number of pre-training steps
165
+
166
+ ![](images/ddf2163e1a731999fbbf01e03b30a23864ed47e85daf81d5aea8e8cdb6b486eb.jpg)
167
+
168
+ compared with BERT, the augmented shuffled token prediction in StructBERT's word structural objective had little effect on the loss and accuracy of masked token prediction. On the other hand, the integration of the simpler task of shuffled token prediction (lower loss and higher accuracy) provides StructBERT with the capability of word reordering. In contrast, the new sentence structural objective in StructBERT leads to a more challenging prediction task than that in BERT, as shown in the two figures at the bottom. This new pre-training objective enables StructBERT to exploit inter-sentence structures, which benefits sentence-pair downstream tasks.
169
+
170
+ # 4 RELATED WORK
171
+
172
+ # 4.1 CONTEXTUALIZED LANGUAGE REPRESENTATION
173
+
174
+ A word can have different semantics depending on the its context. Contextualized word representation is considered to be an important part of modern NLP research, with various pre-trained language models (McCann et al., 2017; Peters et al., 2018; Radford et al., 2018; Devlin et al., 2018) emerging recently. ELMo (Peters et al., 2018) learns two unidirectional LMs based on long short-term memory networks (LSTMs). A forward LM reads the text from left to right, and a backward LM encodes the text from right to left. Following the similar idea of ELMo, OpenAI GPT (Radford et al., 2018) expands the unsupervised language model to a much larger scale by training on a giant collection of free text corpora. Different from ELMo, it builds upon a multi-layer Transformer (Vaswani et al., 2017) decoder, and uses a left-to-right Transformer to predict a text sequence word-by-word.
175
+
176
+ In contrast, BERT (Devlin et al., 2018) (as well as its robustly optimized version RoBERTa (Liu et al., 2019b)) employs a bidirectional Transformer encoder to fuse both the left and the right context, and introduces two novel pre-training tasks for better language understanding. We base our LM on the architecture of BERT, and further extend it by introducing word and sentence structures into pre-training tasks for deep language understanding.
177
+
178
+ # 4.2 WORD & SENTENCE ORDERING
179
+
180
+ The task of linearization aims to recover the original order of a shuffled sentence (Schmaltz et al., 2016). Part of larger discussion as to whether LSTMs are capturing syntactic phenomena linearization, is standardized in a recent line of research as a method useful for isolating the performance of text-to-text generation (Zhang & Clark, 2015) models. Recently, Transformers have emerged as a powerful architecture for learning the latent structure of language. For example, Bidirectional Transformers (BERT) has reduced the perplexity for language modeling task. We revisit Elman's question by applying BERT to the word-ordering task, without any explicit syntactic approaches, and find that pre-trained language models are effective for various downstream tasks with linearization.
181
+
182
+ Many important downstream tasks such as STS and NLI (Wang et al., 2018) are based on understanding the relationship between two text sentences, which is not directly captured by language modeling. While BERT (Devlin et al., 2018) pre-trains a binarized next sentence prediction task to understand sentence relationships, we take one step further and treat it as a sentence ordering task. The goal of sentence ordering is to arrange a set of sentences into a coherent text in a clear and consistent manner, which can be viewed as a ranking problem (Chen et al., 2016). The task is general and yet challenging, and once is especially important for natural language generation (Reiter & Dale, 1997). Text should be organized according to the following properties: rhetorical coherence, topical relevancy, chronological sequence, and cause-effect. In this work, we focus on what is arguably the most basic characteristics of a sequence: their order. Most of prior work on sentence ordering was part of the study of downstream tasks, such as multi-document summarization (Bollegala et al., 2010). We revisit this problem in the context of language modeling as a new sentence prediction task.
183
+
184
+ # 5 CONCLUSION
185
+
186
+ In this paper, we propose novel structural pre-training which incorporates word and sentence structures into BERT pre-training. A word structural objective and a sentence structural objective are introduced as two new pre-training tasks for deep understanding of natural language in different granularities. Experimental results demonstrate that the new StructBERT model can obtain new state-of-the-art
187
+
188
+ results in a variety of downstream tasks, including the popular GLUE benchmark, the SNLI Corpus and the SQuAD v1.1 question answering.
189
+
190
+ # REFERENCES
191
+
192
+ Luisa Bentivogli, Peter Clark, Ido Dagan, and Danilo Giampiccolo. The fifth pascal recognizing textual entailment challenge. In TAC, 2009.
193
+ Danushka Bollegala, Naoaki Okazaki, and Mitsuru Ishizuka. A bottom-up approach to sentence ordering for multi-document summarization. Information processing & management, 46(1): 89-109, 2010.
194
+ Samuel R Bowman, Gabor Angeli, Christopher Potts, and Christopher D Manning. A large annotated corpus for learning natural language inference. arXiv preprint arXiv:1508.05326, 2015.
195
+ Daniel Cer, Mona Diab, Eneko Agirre, Inigo Lopez-Gazpio, and Lucia Specia. Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation. arXiv preprint arXiv:1708.00055, 2017.
196
+ Xinchi Chen, Xipeng Qiu, and Xuanjing Huang. Neural sentence ordering. arXiv preprint arXiv:1607.06952, 2016.
197
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
198
+ William B Dolan and Chris Brockett. Automatically constructing a corpus of sentential paraphrases. In Proceedings of the Third International Workshop on Paraphrasing (IWP2005), 2005.
199
+ Jeffrey L Elman. Finding structure in time. Cognitive science, 14(2):179-211, 1990.
200
+ Eva Hasler, Felix Stahlberg, Marcus Tomalin, Adri de Gispert, and Bill Byrne. A comparison of neural models for word ordering. arXiv preprint arXiv:1708.01809, 2017.
201
+ Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016.
202
+ Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural computation, 9(8): 1735-1780, 1997.
203
+ Mandar Joshi, Danqi Chen, Yinhan Liu, Daniel S Weld, Luke Zettlemoyer, and Omer Levy. Spanbert: Improving pre-training by representing and predicting spans. arXiv preprint arXiv:1907.10529, 2019.
204
+ Hector Levesque, Ernest Davis, and Leora Morgenstern. The winograd schema challenge. In Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning, 2012.
205
+ Xiaodong Liu, Pengcheng He, Weizhu Chen, and Jianfeng Gao. Multi-task deep neural networks for natural language understanding. arXiv preprint arXiv:1901.11504, 2019a.
206
+ Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. RoBERTa: A robustly optimized BERT pretraining approach. arXiv preprint arXiv:1907.11692, 2019b.
207
+ Bryan McCann, James Bradbury, Caiming Xiong, and Richard Socher. Learned in translation: Contextualized word vectors. In Advances in Neural Information Processing Systems, pp. 6294-6305, 2017.
208
+ Tomáš Mikolov, Martin Karafiát, Lukáš Burget, Jan Černocký, and Sanjeev Khudanpur. Recurrent neural network based language model. In Eleventh annual conference of the international speech communication association, 2010.
209
+
210
+ Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. Deep contextualized word representations. arXiv preprint arXiv:1802.05365, 2018.
211
+ Jason Phang, Thibault Févry, and Samuel R Bowman. Sentence encoders on stilts: Supplementary training on intermediate labeled-data tasks. arXiv preprint arXiv:1811.01088, 2018.
212
+ Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. URL https://s3-us-west-2. amazonaws. com/openai-assetss/research-covers/languageunsupervised/language understanding paper. pdf, 2018.
213
+ Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. Squad: 100,000+ questions for machine comprehension of text. arXiv preprint arXiv:1606.05250, 2016.
214
+ Alexander Ratner, Stephen H Bach, Henry Ehrenberg, Jason Fries, Sen Wu, and Christopher Ré. Snorkel: Rapid training data creation with weak supervision. Proceedings of the VLDB Endowment, 11(3):269-282, 2017.
215
+ Ehud Reiter and Robert Dale. Building applied natural language generation systems. Natural Language Engineering, 3(1):57-87, 1997.
216
+ Allen Schmaltz, Alexander M Rush, and Stuart M Shieber. Word ordering without syntax. arXiv preprint arXiv:1604.08633, 2016.
217
+ Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D Manning, Andrew Ng, and Christopher Potts. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 conference on empirical methods in natural language processing, pp. 1631-1642, 2013.
218
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pp. 5998-6008, 2017.
219
+ Alex Wang, Amapreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461, 2018.
220
+ Adina Williams, Nikita Nangia, and Samuel R Bowman. A broad-coverage challenge corpus for sentence understanding through inference. arXiv preprint arXiv:1704.05426, 2017.
221
+ Yonghui Wu, Mike Schuster, Zhifeng Chen, Quoc V Le, Mohammad Norouzi, Wolfgang Macherey, Maxim Krikun, Yuan Cao, Qin Gao, Klaus Macherey, et al. Google's neural machine translation system: Bridging the gap between human and machine translation. arXiv preprint arXiv:1609.08144, 2016.
222
+ An Yang, Quan Wang, Jing Liu, Kai Liu, Yajuan Lyu, Hua Wu, Qiaoqiao She, and Sujian Li. Enhancing pre-trained language representations with rich knowledge for machine reading comprehension. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 2346-2357, 2019a.
223
+ Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, and Quoc V Le. Xlnet: Generalized autoregressive pretraining for language understanding. arXiv preprint arXiv:1906.08237, 2019b.
224
+ Adams Wei Yu, David Dohan, Minh-Thang Luong, Rui Zhao, Kai Chen, Mohammad Norouzi, and Quoc V Le. Qanet: Combining local convolution with global self-attention for reading comprehension. arXiv preprint arXiv:1804.09541, 2018.
225
+ Yue Zhang and Stephen Clark. Discriminative syntax-based word ordering for text generation. Computational linguistics, 41(3):503-538, 2015.
226
+ Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In Proceedings of the IEEE international conference on computer vision, pp. 19-27, 2015.
structbertincorporatinglanguagestructuresintopretrainingfordeeplanguageunderstanding/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0de855850ee2bb19a9bf92b44f1f4781d94946b38f43fa8cd49bde77cb16905
3
+ size 296981
structbertincorporatinglanguagestructuresintopretrainingfordeeplanguageunderstanding/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da390b6b0312ff87079bd572de6bdd8f04a74778b22abd2fbc2227c325ba513e
3
+ size 288693
structpoolstructuredgraphpoolingviaconditionalrandomfields/137ab202-0cdf-41c6-bce9-004525975664_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52b48c6d82a543090c62d8ee2636d68d3b74adb40c4353b48e91cf1243e97dd8
3
+ size 74339
structpoolstructuredgraphpoolingviaconditionalrandomfields/137ab202-0cdf-41c6-bce9-004525975664_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee47dc0379582116cf7d92e671e03ab9a36117cce791f1c5ddb2700def256e43
3
+ size 88398
structpoolstructuredgraphpoolingviaconditionalrandomfields/137ab202-0cdf-41c6-bce9-004525975664_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12b53a768b8fbde930f622f1958ab5f952d6fe6bcfea9d921acb03ea110328be
3
+ size 429557
structpoolstructuredgraphpoolingviaconditionalrandomfields/full.md ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # STRUCTPOOL: STRUCTURED GRAPH POOLING VIA CONDITIONAL RANDOM FIELDS
2
+
3
+ Hao Yuan
4
+
5
+ Department of Computer Science & Engineering
6
+ Texas A&M University
7
+
8
+ College Station, TX 77843, USA
9
+
10
+ hao.yuan@tamu.edu
11
+
12
+ Shuiwang Ji
13
+
14
+ Department of Computer Science & Engineering
15
+ Texas A&M University
16
+
17
+ College Station, TX 77843, USA
18
+
19
+ sji@amu.edu
20
+
21
+ # ABSTRACT
22
+
23
+ Learning high-level representations for graphs is of great importance for graph analysis tasks. In addition to graph convolution, graph pooling is an important but less explored research area. In particular, most of existing graph pooling techniques do not consider the graph structural information explicitly. We argue that such information is important and develop a novel graph pooling technique, know as the STRUCTPOOL, in this work. We consider the graph pooling as a node clustering problem, which requires the learning of a cluster assignment matrix. We propose to formulate it as a structured prediction problem and employ conditional random fields to capture the relationships among the assignments of different nodes. We also generalize our method to incorporate graph topological information in designing the Gibbs energy function. Experimental results on multiple datasets demonstrate the effectiveness of our proposed STRUCTPOOL.
24
+
25
+ # 1 INTRODUCTION
26
+
27
+ Graph neural networks have achieved the state-of-the-art results for multiple graph tasks, such as node classification (Veličković et al., 2018; Gao & Ji, 2019b; Gao et al., 2018) and link prediction (Zhang & Chen, 2018; Cai & Ji, 2020). These results demonstrate the effectiveness of graph neural networks to learn node representations. However, graph classification tasks also require learning good graph-level representations. Since pooling operations are shown to be effective in many image and NLP tasks, it is natural to investigate pooling techniques for graph data (Yu & Koltun, 2016; Springenberg et al., 2014). Recent work extends the global sum/average pooling operations to graph models by simply summing or averaging all node features (Atwood & Towsley, 2016; Simonovsky & Komodakis, 2017). However, these trivial global pooling operations may lose important features and ignore structural information. Furthermore, global pooling are not hierarchical so that we cannot apply them where multiple pooling operations are required, such as Graph U-Net (Gao & Ji, 2019a). Several advanced graph pooling methods, such as SORTPOOL (Zhang et al., 2018), TOPKPOOL (Gao & Ji, 2019a), DIFFPOOL (Ying et al., 2018), and SAGPOOL (Lee et al., 2019), are recently proposed and achieve promising performance on graph classification tasks. However, none of them explicitly models the relationships among different nodes and thus may ignore important structural information. We argue that such information is important and should be explicitly captured in graph pooling.
28
+
29
+ In this work, we propose a novel graph pooling technique, known as the STRUCTPOOL, that formulates graph pooling as a structured prediction problem. Following DIFFPOOL (Ying et al., 2018), we consider graph pooling as a node clustering problem, and each cluster corresponds to a node in the new graph after pooling. Intuitively, two nodes with similar features should have a higher probability of being assigned to the same cluster. Hence, the assignment of a given node should depend on both the input node features and the assignments of other nodes. We formulate this as a structured prediction problem and employ conditional random fields (CRFs) (Lafferty et al., 2001) to capture such high-order structural relationships among the assignments of different nodes. In addition, we generalize our method by incorporating the graph topological information so that our method can control the clique set in our CRFs. We employ the mean field approximation to compute the assignments and describe how to incorporate it in graph networks. Then the networks can be
30
+
31
+ trained in an end-to-end fashion. Experiments show that our proposed STRUCTPOOL outperforms existing methods significantly and consistently. We also show that STRUCTPOOL incurs acceptable computational cost given its superior performance.
32
+
33
+ # 2 BACKGROUND AND RELATED WORK
34
+
35
+ # 2.1 GRAPH CONVOLUTIONAL NETWORKS
36
+
37
+ A graph can be represented by its adjacency matrix and node features. Formally, for a graph $G$ consisting of $n$ nodes, its topology information can be represented by an adjacency matrix $A \in \{0,1\}^{n \times n}$ , and the node features can be represented as $X \in \mathbb{R}^{n \times c}$ assuming each node has a $c$ -dimensional feature vector. Deep graph neural networks (GNNs) learn feature representations for different nodes using these matrices (Gilmer et al., 2017). Several approaches are proposed to investigate deep GNNs, and they generally follow a neighborhood information aggregation scheme (Gilmer et al., 2017; Xu et al., 2019; Hamilton et al., 2017; Kipf & Welling, 2017; Velicković et al., 2018). In each step, the representation of a node is updated by aggregating the representations of its neighbors. Graph Convolutional Networks (GCNs) are popular variants of GNNs and inspired by the first order graph Laplacian methods (Kipf & Welling, 2017). The graph convolution operation is formally defined as:
38
+
39
+ $$
40
+ X _ {i + 1} = f \left(D ^ {- \frac {1}{2}} \hat {A} D ^ {- \frac {1}{2}} X _ {i} P _ {i}\right), \tag {1}
41
+ $$
42
+
43
+ where $\hat{A} = A + I$ is used to add self-loops to the adjacency matrix, $D$ denotes the diagonal node degree matrix to normalize $\hat{A}$ , $X_{i} \in \mathbb{R}^{n \times c_{i}}$ are the node features after $i^{th}$ graph convolution layer, $P_{i} \in \mathbb{R}^{c_{i} \times c_{i+1}}$ is a trainable matrix to perform feature transformation, and $f(\cdot)$ denotes a non-linear activation function. Then $X_{i} \in \mathbb{R}^{n \times c_{i}}$ is transformed to $X_{i+1} \in \mathbb{R}^{n \times c_{i+1}}$ where the number of nodes remains the same. A similar form of GCNs proposed in (Zhang et al., 2018) can be expressed as:
44
+
45
+ $$
46
+ X _ {i + 1} = f \left(D ^ {- 1} \hat {A} X _ {i} P _ {i}\right). \tag {2}
47
+ $$
48
+
49
+ It differs from the GCNs in Equation (1) by performing different normalization and is a theoretically closer approximation to the Weisfeiler-Lehman algorithm (Weisfeiler & Lehman, 1968). Hence, in our models, we use the latter version of GCNs in Equation (2).
50
+
51
+ # 2.2 GRAPH POOLING
52
+
53
+ Several advanced pooling techniques are proposed recently for graph models, such as SORTPOOL, TOPKPOOL, DIFFPOOL, and SAGPOOL, and achieve great performance on multiple benchmark datasets. All of SORTPOOL (Zhang et al., 2018), TOPKPOOL (Gao & Ji, 2019a), and SAGPOOL (Lee et al., 2019) learn to select important nodes from the original graph and use these nodes to build a new graph. They share the similar idea to learn a sorting vector based on node representations using GCNs, which indicates the importance of different nodes. Then only the top $k$ important nodes are selected to form a new graph while the other nodes are ignored. However, the ignored nodes may contain important features and this information is lost during pooling. DIFFPOOL (Ying et al., 2018) treats the graph pooling as a node clustering problem. A cluster of nodes from the original graph are merged to form a new node in the new graph. DIFFPOOL proposes to perform GCNs on node features to obtain node clustering assignment matrix. Intuitively, the cluster assignment of a given node should depend on the cluster assignments of other nodes. However, DIFFPOOL does not explicitly consider such high-order structural relationships, which we believe are important for graph pooling. In this work, we propose a novel structured graph pooling technique, known as the STRUCTPOOL, for effectively learning high-level graph representations. Different from existing methods, our method explicitly captures high-order structural relationships between different nodes via conditional random fields. In addition, our method is generalized by incorporating graph topological information $A$ to control which node pairs are included in our CRFs.
54
+
55
+ # 2.3 INTEGRATING CRFs WITH GNNS
56
+
57
+ Recent work (Gao et al., 2019; Qu et al., 2019; Ma et al., 2019) investigates how to combine CRFs with GNNs. The CGNF (Ma et al., 2019) is a GNN architecture for graph node classification which explicitly models a joint probability of the entire set of node labels via CRFs and performs inference
58
+
59
+ via dynamic programming. In addition, the GMNN (Qu et al., 2019) focuses on semi-supervised object classification tasks and models the joint distribution of object labels conditioned on object attributes using CRFs. It proposes a pseudolikelihood variational EM framework for model learning and inference. Recent work (Gao et al., 2019) integrates CRFs with GNNs by proposing a CRF layer to encourage similar nodes to have similar hidden features so that similarity information can be preserved explicitly. All these methods are proposed for node classification tasks and the CRFs are incorporated in different ways. Different from existing work, our STRUCTPOOL is proposed for graph pooling operation and the energy is optimized via mean field approximation. All operations in our STRUCTPOOL can be realized by GNN operations so that our STRUCTPOOL can be easily used in any GNNs and trained in an end-to-end fashion.
60
+
61
+ # 3 STRUCTURED GRAPH POOLING
62
+
63
+ # 3.1 GRAPH POOLING VIA NODE CLUSTERING
64
+
65
+ Even though pooling techniques are shown to facilitate the training of deep models and improve their performance significantly in many image and NLP tasks (Yu & Koltun, 2016; Springenberg et al., 2014), local pooling operations cannot be directly applied to graph tasks. The reason is there is no spatial locality information among graph nodes. Global max/average pooling operations can be employed for graph tasks but they may lead to information loss, due to largely reducing the size of representations trivially. A graph $G$ with $n$ nodes can be represented by a feature matrix $X \in \mathbb{R}^{n \times c}$ and an adjacent matrix $A \in \{0,1\}^{n \times n}$ . Graph pooling operations aim at reducing the number of graph nodes and learning new representations. Suppose that graph pooling generates a new graph $\tilde{G}$ with $k$ nodes. The representation matrices of $\tilde{G}$ are denoted as $\tilde{X} \in \mathbb{R}^{k \times \tilde{c}}$ and $\tilde{A} \in \{0,1\}^{k \times k}$ . The goal of graph pooling is to learn relationships between $X$ , $A$ and $\tilde{X}$ , $\tilde{A}$ . In this work, we consider graph pooling via node clustering. In particular, the nodes of the original graph $G$ are assigned to $k$ different clusters. Then each cluster is transformed to a new node in the new graph $\tilde{G}$ . The clustering assignments can be represented as an assignment matrix $M \in \mathbb{R}^{n \times k}$ . For hard assignments, $m_{i,j} \in \{0,1\}$ denotes if node $i$ in graph $G$ belongs to cluster $j$ . For soft assignments, $m_{i,j} \in [0,1]$ denotes the probability that node $i$ in graph $G$ belongs to cluster $j$ and $\sum_{j} m_{i,j} = 1$ . Then the new graph $\tilde{G}$ can be computed as
66
+
67
+ $$
68
+ \tilde {X} = M ^ {T} X, \tilde {A} = g \left(M ^ {T} A M\right), \tag {3}
69
+ $$
70
+
71
+ where $g(\cdot)$ is a function that $g(\tilde{a}_{i,j}) = 1$ if $\tilde{a}_{i,j} > 0$ and $g(\tilde{a}_{i,j}) = 0$ otherwise.
72
+
73
+ # 3.2 LEARNING CLUSTERING ASSIGNMENTS VIA CONDITIONAL RANDOM FIELDS
74
+
75
+ Intuitively, node features describe the properties of different nodes. Then nodes with similar features should have a higher chance to be assigned to the same cluster. That is, for any node in the original graph $G$ , its cluster assignment should not only depend on node feature matrix $X$ but also condition on the cluster assignments of the other nodes. We believe such high-order structural information is useful for graph pooling and should be explicitly captured while learning clustering assignments. To this end, we propose a novel structured graph pooling technique, known as STRUCTPOOL, which generates the assignment matrix by considering the feature matrix $X$ and the relationships between the assignments of different nodes. We propose to formulate this as a conditional random field (CRF) problem. The CRFs model a set of random variables with a Markov Random Field (MRF), conditioned on a global observation (Lafferty et al., 2001). We formally define $Y = \{Y_{1},\dots ,Y_{n}\}$ as a random field where $Y_{i}\in \{1,\dots ,k\}$ is a random variable. Each $Y_{i}$ indicates to which cluster the node $i$ is assigned. Here the feature representation $X$ is treated as global observation. We build a graphical model on $Y$ , which is defined as $G^{\prime}$ . Then the pair $(Y,X)$ can be defined as a CRF, characterized by the Gibbs distribution as
76
+
77
+ $$
78
+ P (Y | X) = \frac {1}{Z (X)} \exp \left(- \sum_ {c \in C _ {G ^ {\prime}}} \psi_ {c} \left(Y _ {c} | X\right)\right), \tag {4}
79
+ $$
80
+
81
+ where $c$ denotes a clique, $C_{G'}$ is a set of cliques in $G'$ , $Z(X)$ is the partition function, and $\psi_c(\cdot)$ is a potential function induced by $c$ (Krähenbuhl & Koltun, 2011; Lafferty et al., 2001). Then the Gibbs
82
+
83
+ ![](images/58ac12c189ce86eb4045d7091fb0aee903ea47b4f6119156fdef55fbd6fe6381.jpg)
84
+ Figure 1: Illustrations of our proposed STRUCTPOOL. Given a graph with 6 nodes, the color of each node represents its features. We perform graph pooling to obtain a new graph with $k = 4$ nodes. The unary energy matrix can be obtained by multiple GCN layers using $X$ and $A$ . The pairwise energy is measured by attention matrix using node feature $X$ and topology information $A$ . Then by performing iterative updating, the mean field approximation yields the most probable assignment matrix. Finally, we obtain the new graph with 4 nodes, represented by $\tilde{X}$ and $\tilde{A}$ .
85
+
86
+ energy function for an assignment $y = \{y_{1},\dots ,y_{n}\}$ for all variables can be written as
87
+
88
+ $$
89
+ E (y | X) = \sum_ {c \in C _ {G ^ {\prime}}} \psi_ {c} \left(y _ {c} | X\right). \tag {5}
90
+ $$
91
+
92
+ Finding the optimal assignment is equivalent to maximizing $P(Y|X)$ , which can also be interpreted as minimizing the Gibbs energy.
93
+
94
+ # 3.3 GIBBS ENERGY WITH TOPOLOGY INFORMATION
95
+
96
+ Now we define the clique set $C_{G'}$ in $G'$ . Similar to the existing CRF model (Krahenbuhl & Koltun, 2011), we include all unary cliques in $C_{G'}$ since we need to measure the energy for assigning each node. For pairwise cliques, we generalize our method to control the pairwise clique set by incorporating the graph topological information $A$ . We consider $\ell$ -hop connectivity based on $A$ to define the pairwise cliques, which builds pairwise relationships between different nodes. Let $A^{\ell} \in \{0,1\}^{n \times n}$ represent the $\ell$ -hop connectivity of graph $G$ where $a_{i,j}^{\ell} = 1$ indicates node $i$ and node $j$ are reachable in $G$ within $\ell$ hops. Then we include all pairwise cliques $(i,j)$ in $C_{G'}$ if $a_{i,j}^{\ell} = 1$ . Altogether, the Gibbs energy for a cluster assignment $y$ can be written as
97
+
98
+ $$
99
+ E (y) = \sum_ {i} \psi_ {u} \left(y _ {i}\right) + \sum_ {i \neq j} \psi_ {p} \left(y _ {i}, y _ {j}\right) a _ {i, j} ^ {\ell}, \tag {6}
100
+ $$
101
+
102
+ where $\psi_{u}(y_{i})$ represents the unary energy for node $i$ to be assigned to cluster $y_{i}$ . In addition, $\psi_{p}(y_{i},y_{j})$ is the pairwise energy, which indicates the energy of assigning node $i,j$ to cluster $y_{i},y_{j}$ respectively. Note that we drop the condition information in Equation (6) for simplicity. If $\ell$ is large enough, our CRF is equivalent to the dense CRFs. If $\ell$ is equal to 1, we have $A^{\ell} = A$ so that only 1-hop information in the adjacent matrix is considered. These two types of energy can be obtained directly by neural networks (Zheng et al., 2015). Given the global observations $X$ and the topology information $A$ , we employ multiple graph convolution layers to obtain the unary energy $\Psi_{u}\in \mathbb{R}^{n\times k}$ . Existing work on image tasks (Krähenbuhl & Koltun, 2011) proposes to employ Gaussian kernels to measure the pairwise energy. However, due to computational inefficiency, we cannot directly apply it to our CRF model. The pairwise energy proposed in (Krähenbuhl & Koltun, 2011) can be written as
103
+
104
+ $$
105
+ \psi_ {p} \left(y _ {i}, y _ {j}\right) = \mu \left(y _ {i}, y _ {j}\right) \sum_ {m = 1} ^ {K} w ^ {(m)} k ^ {(m)} \left(x _ {i}, x _ {j}\right), \tag {7}
106
+ $$
107
+
108
+ where $k^{(m)}(\cdot, \cdot)$ represents the $m^{th}$ Gaussian kernel, $x_{i}$ is the feature vector for node $i$ in $X$ , $w^{(m)}$ denotes learnable weights, and $\mu(y_{i}, y_{j})$ is a compatibility function that models the compatibility
109
+
110
+ Algorithm 1 STRUCTPOOL
111
+ 1: Given a graph $G$ with $n$ nodes represented by $X \in \mathbb{R}^{n \times c}$ and $A \in \{0,1\}^{n \times n}$ , the goal is to obtain $\tilde{G}$ with $k$ nodes that $\tilde{X} \in \mathbb{R}^{k \times \tilde{c}}$ and $\tilde{A} \in \{0,1\}^{k \times k}$ . The $\ell$ -hop connectivity matrix $A^{\ell}$ can be easily obtained from $A$ .
112
+ 2: Perform GCNs to obtain unary energy matrix $\Psi_{u} \in \mathbb{R}^{n \times k}$ .
113
+ 3: Initialize that $Q(i,j) = \frac{1}{Z_i} \exp(\Psi_u(i,j))$ for all $0 \leq i \leq n$ and $0 \leq j \leq k$ .
114
+ 4: while not converged do
115
+ 5: Calculate attention map $W$ that $w_{i,j} = \frac{x_i^T x_j}{\sum_{m \neq i} x_i^T x_m} a_{i,j}^\ell$ for all $i \neq j$ and $0 \leq i, j \leq n$ .
116
+ 6: Message passing that $\tilde{Q}(i,j) = \sum_{m \neq i} w_{i,m} Q(m,j)$ .
117
+ 7: Compatibility transform that $\hat{Q}(i,j) = \sum_{m} \mu(m,j) \tilde{Q}(i,m)$ .
118
+ 8: Local update that $\bar{Q}(i,j) = \Psi_u(i,j) - \hat{Q}(i,j)$ .
119
+ 9: Perform normalization that $Q(i,j) = \frac{1}{Z_i} \exp(\bar{Q}(i,j))$ for all $i$ and $j$ .
120
+ 10: end while
121
+ 11: For soft assignments, the assignment matrix is $M = \text{softmax}(Q)$ .
122
+ 12: For hard assignments, the assignment matrix is $M = \text{argmax}(Q)$ for each row.
123
+ 13: Obtain new graph $\tilde{Q}$ that $\tilde{X} = M^T X$ , $\tilde{A} = g(M^T A M)$ .
124
+
125
+ between different assignment pairs. However, it is computationally inefficient to accurately compute the outputs of Gaussian kernels, especially for graph data when the feature vectors are high-dimensional. Hence, in this work, we propose to employ the attention matrix as the measurement of pairwise energy. Intuitively, Gaussian kernels indicate how strongly different feature vectors are connected with each other. Similarly, the attention matrix reflects similarities between different feature vectors but with a significantly less computational cost. Specifically, each feature vector $x_{i}$ is attended to any other feature vector $x_{j}$ if the pair $(i,j)$ is existing in clique set $C_{G'}$ . Hence, the pairwise energy can be obtained by
126
+
127
+ $$
128
+ \psi_ {p} \left(y _ {i}, y _ {j}\right) = \mu \left(y _ {i}, y _ {j}\right) \frac {x _ {i} ^ {T} x _ {j}}{\sum_ {k \neq i} x _ {i} ^ {T} x _ {k}}, \tag {8}
129
+ $$
130
+
131
+ It can be efficiently computed by matrix multiplication and normalization. Minimizing the Gibbs energy in Equation (6) results in the most probable cluster assignments for a given graph $G$ . However, such minimization is intractable, and hence a mean field approximation is proposed (Krähenbuhl & Koltun, 2011), which is an iterative updating algorithm. We follow the mean-field approximation to obtain the most probable cluster assignments. Altogether, the steps of our proposed STRUCTPOOL are shown in Algorithm 1. All operations in our proposed STRUCTPOOL can be implemented as GNN operations, and hence the STRUCTPOOL can be employed in any deep graph model and trained in an end-to-end fashion. The unary energy matrix can be obtained by stacking several GCN layers, and the normalization operations (step 3&9 in Algorithm 1) are equivalent to softmax operations. All other steps can be computed by matrix computations. It is noteworthy that the compatibility function $\mu(y_i, y_j)$ can be implemented as a trainable matrix $\mathcal{N} \in \mathbb{R}^{k \times k}$ , and automatically learned during training. Hence, no prior domain knowledge is required for designing the compatibility function. We illustrate our proposed STRUCTPOOL in Figure 1 where we perform STRUCTPOOL on a graph $G$ with 6 nodes, and obtain a new graph $\tilde{G}$ with 4 nodes.
132
+
133
+ # 3.4 COMPUTATIONAL COMPLEXITY ANALYSIS
134
+
135
+ We theoretically analyze the computational efficiency of our proposed STRUCTPOOL. Since computational efficiency is especially important for large-scale graph datasets, we assume that $n > k, c, \tilde{c}$ . The computational complexity of one GCN layer is $\mathcal{O}(n^3 + n^2 c + n\tilde{c}) \approx \mathcal{O}(n^3)$ . Assuming we employ $i$ layers of GCNs to obtain the unary energy, its computational cost is $\mathcal{O}(in^3)$ . Assuming there are $m$ iterations in our updating algorithm, the computational complexity is $\mathcal{O}(m(n^2c + n^2k + nk^2)) \approx \mathcal{O}(mn^3)$ . The final step for computing $\tilde{A}$ and $\tilde{X}$ takes $\mathcal{O}(nkc + n^2k + nk^2) \approx \mathcal{O}(n^3)$ computational complexity. Altogether, the complexity STRUCTPOOL is $\mathcal{O}((m + i)n^3)$ , which is close to the complexity of stacking $m + i$ layers of GCNs.
136
+
137
+ Table 1: Classification results for six benchmark datasets. Note that none of these deep methods can outperform the traditional method WL on COLLAB. We believe the reason is the graphs in COLLAB only have single-layer structures while deep models are too complex to capture them.
138
+
139
+ <table><tr><td rowspan="2">Method</td><td colspan="6">Dataset</td></tr><tr><td>ENZYMES</td><td>D&amp;D</td><td>COLLAB</td><td>PROTEINS</td><td>IMDB-B</td><td>IMDB-M</td></tr><tr><td>GRAPHLET</td><td>41.03</td><td>74.85</td><td>64.66</td><td>72.91</td><td>-</td><td>-</td></tr><tr><td>SHORTEST-PATH</td><td>42.32</td><td>78.86</td><td>59.10</td><td>76.43</td><td>-</td><td>-</td></tr><tr><td>WL</td><td>53.43</td><td>78.34</td><td>78.61</td><td>74.68</td><td>-</td><td>-</td></tr><tr><td>PATCHYSAN</td><td>-</td><td>76.27</td><td>72.60</td><td>75.00</td><td>71.00</td><td>45.23</td></tr><tr><td>DCNN</td><td>-</td><td>58.09</td><td>52.11</td><td>61.29</td><td>49.06</td><td>33.49</td></tr><tr><td>DGK</td><td>-</td><td>-</td><td>73.09</td><td>71.68</td><td>66.96</td><td>44.55</td></tr><tr><td>ECC</td><td>53.50</td><td>72.54</td><td>67.79</td><td>72.65</td><td>-</td><td>-</td></tr><tr><td>GRAPHSAGE</td><td>54.25</td><td>75.42</td><td>68.25</td><td>70.48</td><td>-</td><td>-</td></tr><tr><td>SET2SET</td><td>60.15</td><td>78.12</td><td>71.75</td><td>74.29</td><td>-</td><td>-</td></tr><tr><td>DGCNN</td><td>57.12</td><td>79.37</td><td>73.76</td><td>75.54</td><td>70.03</td><td>47.83</td></tr><tr><td>DIFFPOOL</td><td>62.53</td><td>80.64</td><td>75.48</td><td>76.25</td><td>-</td><td>-</td></tr><tr><td>STRUCTPOOL</td><td>63.83</td><td>84.19</td><td>74.22</td><td>80.36</td><td>74.70</td><td>52.47</td></tr></table>
140
+
141
+ # 3.5 DEEP GRAPH NETWORKS FOR GRAPH CLASSIFICATION
142
+
143
+ In this section, we investigate graph classification tasks which require both good node-level and graph-level representations. For most state-of-the-art deep graph classification models, they share a similar pipeline that first produces node representations using GNNs, then performs pooling operations to obtain high-level representations, and finally employs fully-connected layers to perform classification. Note that the high-level representations can be either a vector or a group of $k$ vectors. For a set of graphs with different node numbers, with a pre-defined $k$ , our proposed STRUCTPOOL can produce $k$ vectors for each graph. Hence, our method can be easily generalized and coupled to any deep graph classification model. Specially, our model for graph classification is developed based on DGCNN (Zhang et al., 2018). Given any input graph, our model first employs several layers of GCNs (Equation (2)) to aggregate features from neighbors and learn representations for nodes. Next, we perform one STRUCTPOOL layer to obtain $k$ vectors for each graph. Finally, 1D convolutional layers and fully-connected layers are used to classify the graph.
144
+
145
+ # 4 EXPERIMENTAL STUDIES
146
+
147
+ # 4.1 DATASETS AND EXPERIMENTAL SETTINGS
148
+
149
+ We evaluate our proposed STRUCTPOOL on eight benchmark datasets, including five bioinformatics protein datasets: ENZYMES, PTC, MUTAG, PROTEINS (Borgwardt et al., 2005), D&D (Dobson & Doig, 2003), and three social network datasets: COLLAB (Yanardag & Vishwanathan, 2015b), IMDB-B, IMDB-M (Yanardag & Vishwanathan, 2015a). Most of them are relatively large-scale and hence suitable for evaluating deep graph models. We report the statistics and properties of them in Supplementary Table 6. Please see the Supplementary Section A for experimental settings.
150
+
151
+ We compare our method with several state-of-the-art deep GNN methods. PATCHYSAN (Niepert et al., 2016) learns node representations and a canonical node ordering to perform classification. DCNN (Atwood & Towsley, 2016) learns multi-scale substructure features by diffusion graph convolutions and performs global sum pooling. DGK (Yanardag & Vishwanathan, 2015a) models latent representations for sub-structures in graphs, which is similar to learn word embeddings. ECC (Simonovsky & Komodakis, 2017) performs GCNs conditioning on both node features and edge information and uses global sum pooling before the final classifier. GRAPHSAGE (Hamilton et al., 2017) is an inductive framework which generates node embeddings by sampling and aggregating features from local neighbors, and it employs global mean pooling. SET2SET (Vinyals et al., 2015) proposes an aggregation method to replace the global pooling operations in deep graph networks. DGCNN (Zhang et al., 2018) proposes a pooling strategy named SORTPOOL which sorts all nodes
152
+
153
+ Table 2: Comparisons between different pooling techniques under the same framework.
154
+
155
+ <table><tr><td rowspan="2">Method</td><td colspan="6">Dataset</td></tr><tr><td>ENZYMES</td><td>D&amp;D</td><td>COLLAB</td><td>PROTEINS</td><td>IMDB-B</td><td>IMDB-M</td></tr><tr><td>SUM POOL</td><td>47.33</td><td>78.72</td><td>69.45</td><td>76.26</td><td>51.69</td><td>42.76</td></tr><tr><td>SORTPOOL</td><td>52.83</td><td>80.60</td><td>73.92</td><td>76.83</td><td>70.00</td><td>46.26</td></tr><tr><td>TOPK POOL</td><td>53.67</td><td>81.71</td><td>73.34</td><td>77.47</td><td>72.80</td><td>49.00</td></tr><tr><td>DIFFPOOL</td><td>60.33</td><td>80.94</td><td>71.78</td><td>77.74</td><td>72.40</td><td>50.13</td></tr><tr><td>SAGPOOL</td><td>64.17</td><td>81.03</td><td>73.28</td><td>78.82</td><td>73.40</td><td>51.13</td></tr><tr><td>STRUCTPOOL</td><td>63.83</td><td>84.19</td><td>74.22</td><td>80.36</td><td>74.70</td><td>52.47</td></tr></table>
156
+
157
+ by learning and selects the first $k$ nodes to form a new graph. DIFFPOOL (Ying et al., 2018) is built based on GRAPHSAGE architecture but with their proposed differentiable pooling. Note that for most of these methods, pooling operations are employed to obtain graph-level representations before the final classifier. In addition, we compare our STRUCTPOOL with three graph kernels: Graphlet (Shervashidze et al., 2009), Shortest-path (Borgwardt & Kriegel, 2005), and Weisfeiler-Lehman subtree kernel (WL) (Weisfeiler & Lehman, 1968).
158
+
159
+ # 4.2 CLASSIFICATION RESULTS
160
+
161
+ We evaluate our proposed method on six benchmark datasets and compare with several state-of-the-art approaches. The results are reported in Table 1 where the best results are shown in bold and the second best results are shown with underlines. For our STRUCTPOOL, we perform 10-fold cross validations and report the average accuracy for each dataset. The 10-fold splitting is the same as DGCNN. For all comparing methods, the results are taken from existing work (Ying et al., 2018; Zhang et al., 2018). We can observe that our STRUCTPOOL obtains the best performance on 5 out of 6 benchmark datasets. For these 5 datasets, the classification results of our method are significantly better than all comparing methods, including advanced models DGCNN and DIFFPOOL. Notably, our model outperforms the second-best performance by an average of $3.58\%$ on these 5 datasets. In addition, the graph kernel method WL obtains the best performance on COLLAB dataset and none of these deep models can achieve similar performance. Our model can obtain competitive performance compared with the second best model. This is because many graphs in COLLAB only have simple structures and deep models may be too complex to capture them.
162
+
163
+ # 4.3 COMPARISONS OF DIFFERENT POOLING METHODS
164
+
165
+ To demonstrate the effectiveness of our proposed pooling technique, we compare different pooling techniques under the same network framework. Specifically, we compare our STRUCTPOOL with the global sum pool, SORTPOOL, TOPKPOOL, DIFFPOOL, and SAGPOOL. All pooling methods are employed in the network framework introduced in Section 3.5. In addition, the same 10-fold cross validations from DGCNN are used for all pooling methods. We report the results in Table 2 and the best results are shown in bold. Obviously, our method achieves the best performance on five of six datasets, and significantly outperforms all comparing pooling techniques. For the dataset ENZYMES, our obtained result is competitive since SAGPOOL only slightly outperforms our proposed method by $0.34\%$ . Such observations demonstrate the structural information in graphs is useful for graph pooling and the relationships between different nodes should be explicitly modeled.
166
+
167
+ # 4.4 STUDY OF COMPUTATIONAL COMPLEXITY
168
+
169
+ As mentioned in Section 3.4, our proposed STRUCTPOOL yields $\mathcal{O}((m + i)n^3)$ computational complexity. The complexity of DIFFPOOL is $\mathcal{O}(jn^3)$ if we assume it employs $j$ layers of GCNs to obtain the assignment matrix. In our experiments, $i$ is usually set to 2 or 3 which
170
+
171
+ Table 3: The prediction accuracy with different iteration number $m$ .
172
+
173
+ <table><tr><td>Dataset</td><td>m = 1</td><td>m = 3</td><td>m = 5</td><td>m = 10</td></tr><tr><td>ENZYMES</td><td>62.67</td><td>63.00</td><td>63.83</td><td>63.50</td></tr><tr><td>D&amp;D</td><td>82.82</td><td>83.08</td><td>83.59</td><td>84.19</td></tr><tr><td>PROTEINS</td><td>80.09</td><td>80.00</td><td>80.18</td><td>80.18</td></tr></table>
174
+
175
+ is much smaller than $n$ . We conduct experiments to show how different iteration number $m$ affects the prediction accuracy and the results are reported in Table 3. Note that we employ the dense CRF form for all different $m$ . We can observe that the performance generally increases with $m$ increasing, especially for large-scale dataset D&D. We also observe $m = 5$ is a good trade-off between time complexity and prediction performance. Notably, our method can even outperform other approaches when $m = 1$ . Furthermore, we evaluate the running time of our STRUCTPOOL and compare it with DIFFPOOL. For 500 graphs from large-scale dataset D&D, we set $i = j = 3$ and show the averaging time cost to perform pooling for each graph. The time cost for DIFFPOOL is 0.042 second, while our STRUCTPOOL takes 0.049 second, 0.053 second and 0.058 second for $m = 1$ , $m = 3$ , $m = 5$ respectively. Even though our STRUCTPOOL has a relatively higher computational cost, it is still reasonable and acceptable given its superior performance.
176
+
177
+ # 4.5 EFFECTS OF TOPOLOGY INFORMATION
178
+
179
+ Next, we conduct experiments to show how the topology information $A^\ell$ affects the prediction performance. We evaluate our STRUCTPOOL with different $\ell$ values and report the results in Table 4. Note that when $\ell$ is large
180
+
181
+ enough, our STRUCTPOOL considers all pairwise relationships between all nodes, and it is equivalent to the dense CRF. For the datasets IMDB-M and PROTEINS, we can observe that the prediction accuracies are generally increasing with the increasing of $\ell$ . With the increasing of $\ell$ , more pairwise relationships are considered by the model, and hence it is reasonable to obtain better performance. In addition, for the dataset IMDB-B, the results remain similar with different $\ell$ , and even $\ell = 1$ yields competitive performance with dense CRF. It is possible that 1-hop pairwise relationships are enough to learn good embeddings for such graph types. Overall, dense CRF consistently produces promising results and is a proper choice in practice.
182
+
183
+ Table 4: The prediction accuracy using different $A^\ell$ in STRUCTPOOL.
184
+
185
+ <table><tr><td>Dataset</td><td>l = 1</td><td>l = 5</td><td>l = 10</td><td>l = 15</td><td>DENSE</td></tr><tr><td>IMDB-B</td><td>74.60</td><td>74.40</td><td>74.30</td><td>74.70</td><td>74.70</td></tr><tr><td>IMDB-M</td><td>51.53</td><td>51.67</td><td>52.00</td><td>51.96</td><td>52.47</td></tr><tr><td>PROTEINS</td><td>79.73</td><td>79.61</td><td>79.83</td><td>80.36</td><td>80.18</td></tr></table>
186
+
187
+ # 4.6 GRAPH ISOMORPHISM NETWORKS WITH STRUCTPOOL
188
+
189
+ Recently, Graph Isomorphism Networks (GINs) are proposed and shown to be more powerful than traditional GNNs (Xu et al., 2019). To demonstrate the
190
+
191
+ Table 5: Comparisons with Graph Isomorphism Networks.
192
+
193
+ <table><tr><td>Dataset</td><td>PTC</td><td>IMDB-B</td><td>MUTAG</td><td>COLLAB</td><td>IMDB-M</td></tr><tr><td>GINS</td><td>64.60</td><td>75.10</td><td>89.40</td><td>80.20</td><td>52.30</td></tr><tr><td>OURS</td><td>73.46</td><td>78.50</td><td>93.59</td><td>84.06</td><td>54.60</td></tr></table>
194
+
195
+ effectiveness of our STRUCTPOOL and show its generalizability, we build models based on GINs and evaluate their performance. Specifically, we employ GINs to learn node representations and perform one layer of the dense form of our STRUCTPOOL, followed by 1D convolutional layers and fully-connected layers as the classifier. The results are reported in the Table 5, where we employ the same 10-fold splitting as GINs (Xu et al., 2019) and the GIN results are taken from its released results. These five datasets include both bioinformatic data and social media data, and both small-scale data and large-scale data. Obviously, incorporating our proposed STRUCTPOOL in GINs consistently and significantly improves the prediction performance. It leads to an average of $4.52\%$ prediction accuracy improvement, which is promising.
196
+
197
+ # 5 CONCLUSIONS
198
+
199
+ Graph pooling is an appealing way to learn good graph-level representations, and several advanced pooling techniques are proposed. However, none of existing graph pooling techniques explicitly considers the relationship between different nodes. We propose a novel graph pooling technique, known as STRUCTPOOL, which is developed based on the conditional random fields. We consider the graph pooling as a node clustering problem and employ the CRF to build relationships between the assignments of different nodes. In addition, we generalize our method by incorporating the graph topological information so that our method can control the pairwise clique set in our CRFs. Finally,
200
+
201
+ we evaluate our proposed STRUCTPOOL on several benchmark datasets and our method can achieve new state-of-the-art results on five out of six datasets.
202
+
203
+ # ACKNOWLEDGEMENT
204
+
205
+ This work was supported in part by National Science Foundation grants DBI-1661289 and IIS-1908198.
206
+
207
+ # REFERENCES
208
+
209
+ James Atwood and Don Towsley. Diffusion-convolitional neural networks. In Advances in Neural Information Processing Systems, pp. 1993-2001, 2016.
210
+ Karsten M Borgwardt and Hans-Peter Kriegel. Shortest-path kernels on graphs. In Fifth IEEE international conference on data mining (ICDM'05), pp. 8-pp. IEEE, 2005.
211
+ Karsten M Borgwardt, Cheng Soon Ong, Stefan Schonauer, SVN Vishwanathan, Alex J Smola, and Hans-Peter Kriegel. Protein function prediction via graph kernels. Bioinformatics, 21(suppl_1): i47-i56, 2005.
212
+ Lei Cai and Shuiwang Ji. A multi-scale approach for graph link prediction. In Thirty-Fourth AAAI Conference on Artificial Intelligence, 2020.
213
+ Paul D Dobson and Andrew J Doig. Distinguishing enzyme structures from non-enzymes without alignments. Journal of molecular biology, 330(4):771-783, 2003.
214
+ Hongchang Gao, Jian Pei, and Heng Huang. Conditional random field enhanced graph convolutional neural networks. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pp. 276-284. ACM, 2019.
215
+ Hongyang Gao and Shuiwang Ji. Graph u-nets. In International Conference on Machine Learning, pp. 2083-2092, 2019a.
216
+ Hongyang Gao and Shuiwang Ji. Graph representation learning via hard and channel-wise attention networks. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pp. 741-749, 2019b.
217
+ Hongyang Gao, Zhengyang Wang, and Shuiwang Ji. Large-scale learnable graph convolutional networks. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pp. 1416-1424, 2018.
218
+ Justin Gilmer, Samuel S Schoenholz, Patrick F Riley, Oriol Vinyals, and George E Dahl. Neural message passing for quantum chemistry. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 1263-1272. JMLR.org, 2017.
219
+ Will Hamilton, Zhitao Ying, and Jure Leskovec. Inductive representation learning on large graphs. In Advances in Neural Information Processing Systems, pp. 1024-1034, 2017.
220
+ Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Proceedings of the 3rd International Conference on Learning Representations, 2014.
221
+ Thomas N Kipf and Max Welling. Semi-supervised classification with graph convolutional networks. In Proceedings of the International Conference on Learning Representations, 2017.
222
+ Philipp Krahenbuhl and Vladlen Koltun. Efficient inference in fully connected crfs with gaussian edge potentials. In Advances in neural information processing systems, pp. 109-117, 2011.
223
+ John Lafferty, Andrew McCallum, and Fernando CN Pereira. Conditional random fields: Probabilistic models for segmenting and labeling sequence data. In International conference on machine learning, pp. 282-289, 2001.
224
+ Junhyun Lee, Inyeop Lee, and Jaewoo Kang. Self-attention graph pooling. In International Conference on Machine Learning, pp. 3734-3743, 2019.
225
+
226
+ Tengfei Ma, Cao Xiao, Junyuan Shang, and Jimeng Sun. CGNF: Conditional graph neural fields, 2019. URL https://openreview.net/forum?id=ryxMX2R9YQ.
227
+ Mathias Niepert, Mohamed Ahmed, and Konstantin Kutzkov. Learning convolutional neural networks for graphs. In International conference on machine learning, pp. 2014-2023, 2016.
228
+ Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. In Proceedings of the International Conference on Learning Representations, 2017.
229
+ Meng Qu, Yoshua Bengio, and Jian Tang. GMNN: Graph Markov neural networks. In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.), Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pp. 5241-5250, Long Beach, California, USA, 09-15 Jun 2019. PMLR.
230
+ Nino Shervashidze, SVN Vishwanathan, Tobias Petri, Kurt Mehlhorn, and Karsten Borgwardt. Efficient graphlet kernels for large graph comparison. In Artificial Intelligence and Statistics, pp. 488-495, 2009.
231
+ Martin Simonovsky and Nikos Komodakis. Dynamic edge-conditioned filters in convolutional neural networks on graphs. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 3693-3702, 2017.
232
+ Jost Tobias Springenberg, Alexey Dosovitskiy, Thomas Brox, and Martin Riedmiller. Striving for simplicity: The all convolutional net. In Proceedings of the International Conference on Learning Representations, 2014.
233
+ Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. Graph attention networks. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=rJXMpikCZ.
234
+ Oriol Vinyals, Samy Bengio, and Manjunath Kudlur. Order matters: Sequence to sequence for sets. In International Conference on Learning Representations, 2015.
235
+ Boris Weisfeiler and Andrei A Lehman. A reduction of a graph to a canonical form and an algebra arising during this reduction. *Nauchno-Technicheskaya Informatsia*, 2(9):12-16, 1968.
236
+ Keyulu Xu, Weihua Hu, Jure Leskovec, and Stefanie Jegelka. How powerful are graph neural networks? In International Conference on Learning Representations, 2019. URL https://openreview.net/forum?id=ryGs6iA5Km.
237
+ Pinar Yanardag and SVN Vishwanathan. Deep graph kernels. In Proceedings of the 21th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 1365-1374. ACM, 2015a.
238
+ Pinar Yanardag and SVN Vishwanathan. A structural smoothing framework for robust graph comparison. In Advances in neural information processing systems, pp. 2134-2142, 2015b.
239
+ Zhitao Ying, Jiaxuan You, Christopher Morris, Xiang Ren, Will Hamilton, and Jure Leskovec. Hierarchical graph representation learning with differentiable pooling. In Advances in Neural Information Processing Systems, pp. 4800-4810, 2018.
240
+ Fisher Yu and Vladlen Koltun. Multi-scale context aggregation by dilated convolutions. In Proceedings of the International Conference on Learning Representations, 2016.
241
+ Muhan Zhang and Yixin Chen. Link prediction based on graph neural networks. In Advances in Neural Information Processing Systems, pp. 5165-5175, 2018.
242
+ Muhan Zhang, Zhicheng Cui, Marion Neumann, and Yixin Chen. An end-to-end deep learning architecture for graph classification. In AAAI, pp. 4438-4445, 2018.
243
+ Shuai Zheng, Sadeep Jayasumana, Bernardino Romera-Paredes, Vibhav Vineet, Zhizhong Su, Dalong Du, Chang Huang, and Philip HS Torr. Conditional random fields as recurrent neural networks. In Proceedings of the IEEE international conference on computer vision, pp. 1529-1537, 2015.
244
+
245
+ # A APPENDIX
246
+
247
+ # A.1 DATASETS AND EXPERIMENTAL SETTINGS
248
+
249
+ Table 6: Statistics and properties of eight benchmark datasets.
250
+
251
+ <table><tr><td></td><td colspan="4">Dataset</td></tr><tr><td></td><td>ENZYMES</td><td>D&amp;D</td><td>COLLAB</td><td>PROTEINS</td></tr><tr><td># of Edges (avg)</td><td>124.20</td><td>1431.3</td><td>2457.78</td><td>72.82</td></tr><tr><td># of Nodes (avg)</td><td>32.63</td><td>284.32</td><td>74.49</td><td>39.06</td></tr><tr><td># of Graphs</td><td>600</td><td>1178</td><td>5000</td><td>1113</td></tr><tr><td># of Classes</td><td>6</td><td>2</td><td>3</td><td>2</td></tr><tr><td colspan="5">Dataset</td></tr><tr><td></td><td>IMDB-B</td><td>IMDB-M</td><td>PTC</td><td>MUTAG</td></tr><tr><td># of Edges (avg)</td><td>96.53</td><td>65.94</td><td>14.69</td><td>19.79</td></tr><tr><td># of Nodes (avg)</td><td>19.77</td><td>13.00</td><td>14.30</td><td>17.93</td></tr><tr><td># of Graphs</td><td>1000</td><td>1500</td><td>344</td><td>188</td></tr><tr><td># of Classes</td><td>2</td><td>3</td><td>2</td><td>2</td></tr></table>
252
+
253
+ We report the statistics and properties of eight benchmark datasets in Supplementary Table 6. For our STRUCTPOOL, we implement our models using Pytorch (Paszke et al., 2017) and conduct experiments on one GeForce GTX 1080 Ti GPU. The model is trained using Stochastic gradient descent (SGD) with the ADAM optimizer (Kingma & Ba, 2014). For the models built on DGCNN (Zhang et al., 2018) in Section 4.2, 4.3, 4.4, 4.5, we employ GCNs to obtain the node features and the unary energy matrix. All experiments in these sections perform 10-fold cross validations and we report the averaging results. The 10-fold splitting is exactly the same as DGCNN (Zhang et al., 2018). For the non-linear function, we employ tanh for GCNs and relu for 1D convolution layers. For the models built on GINs in Section 4.6, we employ GINs to learn node features and unary energy. Here the 10-fold splitting is exactly the same as GINs. We employ relu for all layers as the non-linear function. For all models, 1D convolutional layers and fully-connected layers are used after our STRUCTPOOL. Hard clustering assignments are employed in all experiments.
254
+
255
+ # A.2 EFFECTS OF PAIRWISE ENERGY
256
+
257
+ Table 7: Comparison with the baseline which excludes pairwise energy.
258
+
259
+ <table><tr><td>Dataset</td><td>ENZYMES</td><td>D&amp;D</td><td>COLLAB</td><td>PROTEINS</td><td>IMDB-B</td><td>IMDB-M</td></tr><tr><td>BASELINE</td><td>60.83</td><td>81.30</td><td>70.58</td><td>78.18</td><td>72.40</td><td>50.13</td></tr><tr><td>OURS</td><td>63.83</td><td>84.19</td><td>74.22</td><td>80.36</td><td>74.70</td><td>52.47</td></tr></table>
260
+
261
+ We conduct experiments to show the importance of the pairwise energy. If the pairwise energy is removed, the relations between different node assignments are not explicitly considered. Then the method is similar to the DIFFPOOL. We compare our method with such a baseline that removes the pairwise energy. Experimental results are reported in Table 7. The network framework is the same as introduced in Section 3.5 and the same 10-fold cross validations from DGCNN are used. Obviously, our proposed method consistently and significantly outperforms the baseline which excludes pairwise energy. It indicates the importance and effectiveness of incorporating pairwise energy and considering high-order relationships between different node assignments.
262
+
263
+ # A.3 STUDY OF HIERARCHICAL NETWORK STRUCTURE
264
+
265
+ To demonstrate how the network depth and multiple pooling layers affects the prediction performance, we conduct experiments to evaluate different hierarchical network structures. We first define a network block contains two GCN layers and one STRUCTPOOL layer. Then we compare three
266
+
267
+ Table 8: Comparison with different hierarchical network structures.
268
+
269
+ <table><tr><td>Dataset</td><td>1 BLOCK</td><td>2 BLOCKS</td><td>3 BLOCKS</td></tr><tr><td>PROTEINS</td><td>79.73</td><td>77.42</td><td>74.95</td></tr><tr><td>D&amp;D</td><td>81.87</td><td>83.59</td><td>81.63</td></tr></table>
270
+
271
+ different network settings: 1 block with the final classifier, 2 blocks with the final classifier, and 3 blocks with the final classifier. The results are reported in Table 8. For the dataset Proteins, we observe that the network with one block can obtain better performance than deeper networks. We believe the main reason is dataset Proteins is a small-scale dataset with an average number of nodes equal to 39.06. A relatively simpler network is powerful enough to learn its data distribution while stacking multiple GCN layers and pooling layers may lead to a serious overfitting problems. For the dataset D&D, the network with 2 blocks performs better than the one with 1 block. Since D&D is relatively large scale, stacking 2 blocks increases the power of network and hence increases the performance. However, going very deep, e.g., stacking 3 blocks, will cause the overfitting problem.
272
+
273
+ # A.4 STUDY OF GRAPH POOLING RATE
274
+
275
+ Table 9: Comparison with different pooling rates.
276
+
277
+ <table><tr><td></td><td>r = 0.1</td><td>r = 0.3</td><td>r = 0.5</td><td>r = 0.7</td><td>r = 0.9</td></tr><tr><td>k</td><td>91</td><td>160</td><td>241</td><td>331</td><td>503</td></tr><tr><td>ACC</td><td>80.77</td><td>81.53</td><td>81.53</td><td>81.97</td><td>80.68</td></tr></table>
278
+
279
+ We follow the DGCNN (Zhang et al., 2018) to select the number of clusters $k$ . Specifically, we use a pooling rate $r \in (0,1)$ to control $k$ . Then $k$ is set to an integer so that $r \times 100\%$ of graphs have nodes less than this integer in the current dataset. As suggested in DGCNN, generally, $r = 0.9$ is a proper choice for bioinformatics datasets and $r = 0.6$ is good for social network datasets. In addition, we conduct experiments to show the performance with the respect to different $r$ values. We set $r = 0.1, 0.3, 0.5, 0.7, 0.9$ to evaluate the performance on a large-scale social network dataset D&D. The average number of nodes in dataset D&D is 284.32 and the maximum number of nodes is 5748. The results are reported in Table 9 where the first row shows different pooling rates, the second row reports the corresponding $k$ values and the final row shows the results. For simplicity, we employ the network structure with 1 block and a final classifier (as defined in Section A.3). We can observe that the performance drops when $r, k$ is relatively large or small. In addition, the model can obtain competitive performance when $r$ is set to a proper range, for example, $r \in [0.3, 0.7]$ for dataset D&D.
structpoolstructuredgraphpoolingviaconditionalrandomfields/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a36eaf52ab345628c98104867c81a2ae37d26b469909d17a02dad820086f77a
3
+ size 344561
structpoolstructuredgraphpoolingviaconditionalrandomfields/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d10fa9667c1e4079782f180f58d69a1c6d1342d8a7eda9274093fbe1e03ac1d1
3
+ size 446273
structuredobjectawarephysicspredictionforvideomodelingandplanning/faa820dc-e76d-498e-8695-ab0555120424_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ae30c7e606e724463194d1bf11d1fa7e52d4e7bb246c0c6ec5939efc47acdcd
3
+ size 104796
structuredobjectawarephysicspredictionforvideomodelingandplanning/faa820dc-e76d-498e-8695-ab0555120424_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:614678b33b82fb727010757539373b169044da77f3cbb9ab74040fa4baf13903
3
+ size 131537
structuredobjectawarephysicspredictionforvideomodelingandplanning/faa820dc-e76d-498e-8695-ab0555120424_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2a92e5352aeff4d75634cd2443fad8f3aedc12be8e7d11fa0586ebff0b513b2
3
+ size 1152625
structuredobjectawarephysicspredictionforvideomodelingandplanning/full.md ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # STRUCTURED OBJECT-AWARE PHYSICS PREDICTION FOR VIDEO MODELING AND PLANNING
2
+
3
+ Jannik Kossen\*1, Karl Stelzner\*2, Marcel Hussing3, Claas Voelcker3 & Kristian Kersting2
4
+
5
+ $^{1}$ Department of Physics and Astronomy, Heidelberg University
6
+ $^{1}$ kossen@stud.uni-heidelberg.de
7
+ $^{2,3}$ Department of Computer Science, TU Darmstadt
8
+ 2{stelzner,kersting}@cs.tu-darmstadt.de
9
+ $^{3}$ marcel.hussing, c.voelcker}@stud.tu-darmstadt.de
10
+
11
+ # ABSTRACT
12
+
13
+ When humans observe a physical system, they can easily locate objects, understand their interactions, and anticipate future behavior. For computers, however, learning such models from videos in an unsupervised fashion is an unsolved research problem. In this paper, we present STOVE, a novel state-space model for videos, which explicitly reasons about objects and their positions, velocities, and interactions. It is constructed by combining an image model and a dynamics model in compositional manner and improves on previous work by reusing the dynamics model for inference, accelerating and regularizing training. STOVE predicts videos with convincing physical behavior over thousands of timesteps, outperforms previous unsupervised models, and even approaches the performance of supervised baselines. We further demonstrate the strength of our model as a simulator for sample efficient model-based control in a task with heavily interacting objects.
14
+
15
+ # 1 INTRODUCTION
16
+
17
+ Obtaining structured knowledge about the world from unstructured, noisy sensory input is a key challenge in artificial intelligence. Of particular interest is the problem of identifying objects from visual input and understanding their interactions. One longstanding approach to this is the idea of vision as inverse graphics (Grenander, 1976), which postulates a data generating graphics process and phrases vision as posterior inference in the induced distribution. Despite its intuitive appeal, vision as inference has remained largely intractable in practice due to the high-dimensional and multimodal nature of the inference problem. Recently, however, probabilistic models based on deep neural networks have made promising advances in this area. By composing conditional distributions parameterized by neural networks, highly expressive yet structured models have been built. At the same time, advances in general approximate inference, particularly variational techniques, have put the inference problem for these models within reach (Zhang et al., 2019).
18
+
19
+ Based on these advances, a number of probabilistic models for unsupervised scene understanding in single images have recently been proposed. The structured nature of approaches such as AIR (Eslami et al., 2016), MONet (Burgess et al., 2019), or IODINE (Greff et al., 2019) provides two key advantages over unstructured image models such as variational autoencoders (Kingma & Welling, 2014) or generative adversarial networks (Goodfellow et al., 2014). First, it allows for the specification of inductive biases, such as spatial consistency of objects, which constrain the model and act as regularization. Second, it enables the use of semantically meaningful latent variables, such as object positions, which may be used for downstream reasoning tasks.
20
+
21
+ Building such a structured model for videos instead of individual images is the natural next challenge. Not only could such a model be used in more complex domains, such as reinforcement learning, but the additional redundancy in the data can even simplify and regularize the object detection problem (Kosiorek et al., 2018). To this end, the notion of temporal consistency may be leveraged
22
+
23
+ ![](images/cb59835af68051c91d32ebb04fe5aef936306b7872a7326fdebbf4964b5f661e.jpg)
24
+ Figure 1: Overview of STOVE's architecture. (Center left) At time $t$ , the input image $x_{t}$ is processed by an LSTM in order to obtain a proposal distribution over object states $q(z_{t} \mid x_{t})$ . (Top) A separate proposal $q(z_{t} \mid z_{t-1})$ is obtained by propagating the previous state $z_{t-1}$ using the dynamics model. (Center) The multiplication of both proposal distributions yields the final variational distribution $q(z_{t} \mid z_{t-1}, x_{t})$ . (Right) We sample $z_{t}$ from this distribution to evaluate the generative distribution $p(z_{t} \mid z_{t-1})p(x_{t} \mid z_{t})$ , where $p(z_{t} \mid z_{t-1})$ shares means - but not variances - with $q(z_{t} \mid z_{t-1})$ , and $p(x_{t} \mid z_{t})$ can be obtained by direct evaluation of $x_{t}$ in the sum-product networks. Not shown is the dependence on $x_{t-1}$ in the inference routine which allows for the inference of velocities. (Best viewed in color.)
25
+
26
+ as an additional inductive bias, guiding the model to desirable behavior. In situations where interactions between objects are prevalent, understanding and explicitly modeling these interactions in an object-centric state-space is valuable for obtaining good predictive models (Watters et al., 2017). Existing works in this area, such as SQAIR (Kosiorek et al., 2018), DDPAE (Hsieh et al., 2018), R-NEM (Van Steenkiste et al., 2018), and COBRA (Watters et al., 2019) have explored these concepts, but have not demonstrated realistic long term video predictions on par with supervised approaches to modeling physics.
27
+
28
+ To push the limits of unsupervised learning of physical interactions, we propose STOVE, a structured, object-aware video model. With STOVE, we combine image and physics modeling into a single state-space model which explicitly reasons about object positions and velocities. It is trained end-to-end on pure video data in a self-supervised fashion and learns to detect objects, to model their interactions, and to predict future states and observations. To facilitate learning via variational inference in this model, we provide a novel inference architecture, which reuses the learned generative physics model in the variational distribution. As we will demonstrate, our model generates convincing rollouts over hundreds of time steps, outperforms other video modeling approaches, and approaches the performance of the supervised baseline which has access to the ground truth object states.
29
+
30
+ Moving beyond unsupervised learning, we also demonstrate how STOVE can be employed for model-based reinforcement learning (RL). Model-based approaches to RL have long been viewed
31
+
32
+ ![](images/80e374ba9bdd88b77c65c4ff5cee6af1b6fa7d122851eab37f752f72ccb3df1c.jpg)
33
+ Figure 2: (Left) Depiction of the graphical model underlying STOVE. Black arrows denote the generative mechanism and red arrows the inference procedure. The variational distribution $q(z_{t} \mid z_{t-1}, x_{t}, x_{t-1})$ is formed by combining predictions from the dynamics model $p(z_{t} \mid z_{t-1})$ and the object detection network $q(z_{t} \mid x_{t})$ . For the RL domain, our approach is extended by action conditioning and reward prediction. (Right) Components of $z_{t}^{o}$ and corresponding variational distributions. Note that the velocities are estimated based on the change in positions between timesteps, inducing a dependency on $x_{t-1}$ .
34
+
35
+ ![](images/43a24489b4452ffc7843b5b67f41f5feb99bc3bc00802506ee22126652331eaa.jpg)
36
+
37
+ $$
38
+ q \left(z _ {t, \text {p o s}} ^ {o} \mid z _ {t - 1}\right) \cdot q \left(z _ {t, \text {p o s}} ^ {o} \mid x _ {t}\right)
39
+ $$
40
+
41
+ $$
42
+ q \left(z _ {t, \text {v e l o}} ^ {o} \mid z _ {t - 1}\right) \cdot q \left(z _ {t, \text {v e l o}} ^ {o} \mid x _ {t}, x _ {t - 1}\right)
43
+ $$
44
+
45
+ $$
46
+ q (z _ {t, \mathrm {s i z e}} ^ {o} \mid x _ {t})
47
+ $$
48
+
49
+ $$
50
+ q (z _ {t, \text {l a t e n t}} ^ {o} \mid z _ {t - 1})
51
+ $$
52
+
53
+ as a potential remedy to the often prohibitive sample complexity of model-free RL, but obtaining learned models of sufficient quality has proven difficult in practice (Sutton & Barto, 2011). By conditioning state predictions on actions and adding reward predictions to our dynamics predictor, we extend our model to the RL setting, allowing it to be used for search or planning. Our empirical evidence shows that an actor based on Monte-Carlo tree search (MCTS) (Coulom, 2007) on top of our model is competitive to model-free approaches such as Proximal Policy Optimization (PPO) (Schulman et al., 2017), while only requiring a fraction of the samples.
54
+
55
+ We proceed by introducing the two main components of STOVE: a structured image model and a dynamics model. We show how to perform joint inference and training, as well as how to extend the model to the RL setting. We then present our experimental evaluation, before touching on further related work and concluding.
56
+
57
+ # 2 STRUCTURED OBJECT-AWARE VIDEO MODELING
58
+
59
+ We approach the task of modeling a video with frames $x_{1}, \ldots, x_{T}$ from a probabilistic perspective, assuming a sequence of Markovian latent states $z_{1}, \ldots, z_{T}$ , which decompose into the properties of a fixed number $O$ of objects, i.e. $z_{t} = (z_{t}^{1}, \ldots, z_{t}^{O})$ . In the spirit of compositionality, we propose to specify and train such a model by explicitly combining a dynamics prediction model $p(z_{t+1} \mid z_{t})$ and a scene model $p(x_{t} \mid z_{t})$ . This yields a state-space model, which can be trained on pure video data, using variational inference and an approximate posterior distribution $q(z \mid x)$ . Our model differs from previous work that also follows this methodology, most notably SQAIR and DDPAE, in three major ways:
60
+
61
+ - We propose a more compact architecture for the variational distribution $q(z \mid x)$ , which reuses the dynamics model $p(z_{t+1} \mid z_t)$ , and avoids the costly double recurrence across time and objects which was present in previous work.
62
+ - We parameterize the dynamics model using a graph neural network, taking advantage of the decomposed nature of the latent state $z$ .
63
+ - Instead of treating each $z_{t}^{o}$ as an arbitrary latent code, we explicitly reserve the first six slots of this vector for the object's position, size, and velocity, each in $x, y$ direction, and use this information for the dynamics prediction task. We write $z_{t}^{o} = (z_{t,\mathrm{pos}}^{o}, z_{t,\mathrm{size}}^{o}, z_{t,\mathrm{velo}}^{o}, z_{t,\mathrm{latent}}^{o})$ .
64
+
65
+ We begin by briefly introducing the individual components before discussing how they are combined to form our state-space model. Fig. 1 visualises the computational flow of STOVE's inference and generative routines, Fig. 2 (left) specifies the underlying graphical model.
66
+
67
+ # 2.1 OBJECT-BASED MODELING OF IMAGES USING SUM-PRODUCT ATTEND-INFER-REPEAT
68
+
69
+ A variety of object-centric image models have recently been proposed, many of which are derivatives of attend-infer-repeat (AIR) (Eslami et al., 2016). AIR postulates that each image consists of a set of objects, each of which occupies a rectangular region in the image, specified by positional parameters $z_{\text{where}}^o = (z_{\text{pos}}^o, z_{\text{size}}^o)$ . The visual content of each object is described by a latent code $z_{\text{what}}^o$ . By decoding $z_{\text{what}}^o$ with a neural network and rendering the resulting image patches in the prescribed location, a generative model $p(x \mid z)$ is obtained. Inference is accomplished using a recurrent neural network, which outputs distributions over the latent objects $q(z^o \mid x)$ , attending to one object at a time. AIR is also capable of handling varying numbers of objects, using an additional set of latent variables.
70
+
71
+ Sum-Product Attend-Infer-Repeat (SuPAIR) (Stelzner et al., 2019) utilizes sum-product networks (SPNs) instead of a decoder network to directly model the distribution over object appearances. The tractable inference capabilities of the SPNs used in SuPAIR allow for the exact and efficient computation of $p(x \mid z_{\text{where}})$ , effectively integrating out the appearance parameters $z_{\text{what}}$ analytically. This has been shown to drastically accelerate learning, as the reduced inference workload significantly lowers the variance of the variational objective. Since the focus of SuPAIR on interpretable object parameters fits our goal of building a structured video model, we apply it as our image model $p(x_t \mid z_t)$ . Similarly, we use a recurrent inference network as in SuPAIR to model $q(z_{t,\text{where}} \mid x_t)$ . For details on SuPAIR, we refer to Stelzner et al. (2019).
72
+
73
+ # 2.2 MODELING PHYSICAL INTERACTIONS USING GRAPH NEURAL NETWORKS
74
+
75
+ In order to successfully capture complex dynamics, the state transition distribution $p(z_{t+1} \mid z_t) = p(z_{t+1}^1, \ldots, z_{t+1}^O \mid z_t^1, \ldots, z_t^O)$ needs to be parameterized using a flexible, non-linear estimator. A critical property that should be maintained in the process is permutation invariance, i.e., the output should not depend on the order in which objects appear in the vector $z_t$ . This type of function is well captured by graph neural networks, cf. (Santoro et al., 2017), which posit that the output should depend on the sum of pairwise interactions between objects. Graph neural networks have been extensively used for modeling physical processes in supervised scenarios (Battaglia et al., 2016; 2018; Sanchez-Gonzalez et al., 2018; Zhou et al., 2018).
76
+
77
+ Following this line of work, we build a dynamics model of the basic form
78
+
79
+ $$
80
+ \hat {z} _ {t + 1, \text {p o s}} ^ {o}, \hat {z} _ {t + 1, \text {v e l o}} ^ {o}, \hat {z} _ {t + 1, \text {l a t e n t}} ^ {o} = f \left(g \left(z _ {t} ^ {o}\right) + \sum_ {o ^ {\prime} \neq o} \alpha \left(z _ {t} ^ {o}, z _ {t} ^ {o ^ {\prime}}\right) h \left(z _ {t} ^ {o}, z _ {t} ^ {o ^ {\prime}}\right)\right) \tag {1}
81
+ $$
82
+
83
+ where $f, g, h, \alpha$ represent functions parameterized by dense neural networks. $\alpha$ is an attention mechanism outputting a scalar which allows the network to focus on specific object pairs. We assume a constant prior over the object sizes, i.e., $\hat{z}_{t+1,\text{size}}^o = z_{t,\text{size}}^o$ . The full state transition distribution is then given by the Gaussian $p(z_{t+1}^o \mid z_t^o) = \mathcal{N}(\hat{z}_{t+1}^o, \sigma)$ , using a fixed $\sigma$ .
84
+
85
+ # 2.3 JOINT STATE-SPACE MODEL
86
+
87
+ Next, we assemble a state-space model from the two separate models for image modeling and physics prediction. The interface between the two components are the latent positions and velocities. The scene model infers them from images and the physics model propagates them forward in time. Combining the two yields the state-space model $p(x,z) = p(z_0)p(x_0\mid z_0)\prod_tp(z_t\mid z_{t - 1})p(x_t\mid z_t)$ . To initialize the state, we model $p(z_0,z_1)$ using simple uniform and Gaussian distributions. Details are given in Appendix C.3.
88
+
89
+ Our model is trained on given video sequences $x$ by maximizing the evidence lower bound (ELBO) $\mathbb{E}_{q(z|x)}[\log p(x,z) - \log q(z\mid x)]$ . This requires formulating a variational distribution $q(z\mid x)$ to approximate the true posterior $p(z\mid x)$ . A natural approach is to factorize this distribution over time, i.e. $q(z\mid x) = q(z_0\mid x_0)\prod_tq(z_t\mid z_{t - 1},x_t)$ , resembling a Bayesian filter. The distribution $q(z_0\mid x_0)$ is then readily available using the inference network provided by SuPAIR.
90
+
91
+ The formulation of $q(z_{t} \mid z_{t-1}, x_{t})$ , however, is an important design decision. Previous work, including SQAIR and DDPAE, have chosen to unroll this distribution over objects, introducing a
92
+
93
+ ![](images/297e236bb2851bcc3381942f344740012fee6899588e72d38a97bd943b395cce.jpg)
94
+ real
95
+
96
+ ![](images/bdd0019f710514d92cab17b81f215b7a014deb7f1374e10b9a1973b413275c6f.jpg)
97
+ ours
98
+
99
+ ![](images/1c590213f9cb70ef3af5d17d6e6115217531f01df8b86ccf1e2d5e1562c134eb.jpg)
100
+ sqair
101
+ Figure 3: Visualisation of object positions from the real environment and predictions made by our model, SQAIR, and the supervised baseline, for the billiards and gravity environment after the first 8 frames were given. Our model achieves realistic behaviour, outperforms the unsupervised baselines, and approaches the quality of the supervised baseline, despite being fully unsupervised. For full effect, the reader is encouraged to watch animated versions of the sequences in repository github.com/jlko/STOVE. (Best viewed in color.)
102
+
103
+ ![](images/31d408b68aade7ee5f6a9fe150d7f81ded76bff8dc305a04c6037e420b657520.jpg)
104
+ supervised
105
+
106
+ ![](images/aed1d1738ce6d39f88fffed7c7bbdf9dbe469336a369a3ed212d3b45882799cc.jpg)
107
+ real
108
+
109
+ ![](images/ce098010c9a502f7ffb7f71f7355fb28d2fd2008229fcad329ea2381e04cf460.jpg)
110
+ ours
111
+
112
+ ![](images/abaf8652dc3abcbb44f778f7bd4176bac6fe51b8202ec7eb60c1730283cdaebf.jpg)
113
+ sqair
114
+
115
+ ![](images/2608b96a87f8b297e5cb7669a1866a1f64a9d102732a9796d6f0359368dec0b5.jpg)
116
+ supervised
117
+
118
+ costly double recurrence over time and objects, requiring $T \cdot O$ sequential recurrence steps in total. This increases the variance of the gradient estimate, slows down training, and hampers scalability. Inspired by Becker-Ehmck et al. (2019), we avoid this cost by reusing the dynamics model for the variational distribution. First, we construct the variational distribution $q(z_{t,\mathrm{pos}}^o \mid z_{t - 1}^o)$ by slightly adjusting the dynamics prediction $p(z_{t,\mathrm{pos}}^o \mid z_{t - 1}^o)$ , using the same mean values but separately predicted standard deviations. Together with an estimate for the same object by the object detection network $q(z_{t,\mathrm{pos}}^o \mid x_t)$ , we construct a joint estimate by multiplying the two Gaussians and renormalizing, yielding another Gaussian:
119
+
120
+ $$
121
+ q \left(z _ {t, \text {p o s}} ^ {o} \mid z _ {t - 1}, x _ {t}\right) \propto q \left(z _ {t, \text {p o s}} ^ {o} \mid z _ {t - 1}\right) \cdot q \left(z _ {t, \text {p o s}} ^ {o} \mid x _ {t}\right). \tag {2}
122
+ $$
123
+
124
+ Intuitively, this results in a distribution which reconciles the two proposals. A double recurrence is avoided since $q(z_{t} \mid x_{t})$ does not depend on previous timesteps and may thus be computed in parallel for all frames. Similarly, $q(z_{t} \mid z_{t-1})$ may be computed in parallel for all objects, leading to only $T + O$ sequential recurrence steps total. An additional benefit of this approach is that the information learned by the dynamics network is reused for inference — if $q(z_{t} \mid x_{t}, z_{t-1})$ were just another neural network, it would have to essentially relearn the environment's dynamics from scratch, resulting in a waste of parameters and training time. A further consequence is that the image likelihood $p(x_{t} \mid z_{t})$ is backpropagated through the dynamics model, which has been shown to be beneficial for efficient training (Karl et al., 2017; Becker-Ehmck et al., 2019). The same procedure is applied to reconcile velocity estimates from the two networks, where for the image model, velocities $z_{t,\mathrm{velo}}^{o}$ are estimated from position differences between two consecutive timesteps. The object scales $z_{t,\mathrm{scale}}^{o}$ are inferred solely from the image model. The latent states $z_{t,\mathrm{latent}}^{o}$ increase the modelling capacity of the dynamics network, are initialised to zero-mean Gaussians, and do not interact with the image model. This then gives the inference procedure for the full latent state $z_{t}^{o} = (z_{t,\mathrm{pos}}^{o}, z_{t,\mathrm{size}}^{o}, z_{t,\mathrm{velo}}^{o}, z_{t,\mathrm{latent}}^{o})$ , as illustrated in Fig. 2 (right).
125
+
126
+ Despite its benefits, this technique has thus far only been used in environments with a single object or with known state information. A challenge when applying it in a multi-object video setting is to match up the proposals of the two networks. Since the object detection RNN outputs proposals for object locations in an indeterminate order, it is not immediately clear how to find the corresponding proposals from the dynamics network. We have, however, found that a simple matching procedure results in good performance: For each $z_{t}$ , we assign the object order that results in the minimal difference of $||z_{t,\mathrm{pos}} - z_{t-1,\mathrm{pos}}||$ , where $||\cdot||$ is the Euclidean norm. The resulting Euclidean bipartite matching problem can be solved in cubic time using the classic Hungarian algorithm (Kuhn, 1955).
127
+
128
+ # 2.4 CONDITIONING ON ACTIONS
129
+
130
+ In reinforcement learning, an agent interacts with the environment sequentially through actions $a_{t}$ to optimize a cumulative reward $r$ . To extend STOVE to operate in this setting, we make two changes, yielding a distribution $p(z_{t},r_{t}\mid z_{t - 1},a_{t - 1})$ .
131
+
132
+ First, we condition the dynamics model on actions $a_{t}$ , enabling a conditional prediction based on both state and action. To keep the model invariant to the order of the input objects, the action information is concatenated to each object state $z_{t - 1}^{o}$ before they are fed into the dynamics model. The model has to learn on its own which of the objects in the scene are influenced by the actions. To facilitate this, we have found it helpful to also concatenate appearance information from the
133
+
134
+ ![](images/531e3fbe27cf6cbc34e6312828218138abab2fe81a71aaf2ad0ebf9ebac07225.jpg)
135
+ Figure 4: Mean test set performance of our model compared to baselines. Our approach (STOVE) clearly outperforms all unsupervised baselines and is almost indistinguishable from the supervised dynamics model on the billiards task. (Top) Mean squared errors over all pixels in the video prediction setting (the lower, the better). (Bottom) Mean Euclidean distances between predicted and true positions (the lower, the better). All position and pixel values are in [0, 1]. In all experiments, the first eight frames are given, all remaining frames are then conditionally generated. The shading indicates the max and min values over multiple training runs with identical hyperparameters. (Best viewed in color.)
136
+
137
+ extracted object patches to the object state. While this patch-wise code could, in general, be obtained using some neural feature extractor, we achieved satisfactory performance by simply using the mean values per color channel when given colored input.
138
+
139
+ The second change to the model is the addition of reward prediction. In many RL environments, rewards depend on the interactions between objects. Therefore, the dynamics prediction architecture, presented in Eq. 1, is well suited to also predict rewards. We choose to share the same encoding of object interactions between reward and dynamics prediction and simply apply two different output networks ( $f$ in Eq. 1) to obtain the dynamics and reward predictions. The total model is again optimized using the ELBO, this time including the reward likelihood $p(r_t \mid z_{t-1}, a_{t-1})$ .
140
+
141
+ # 3 EXPERIMENTAL EVIDENCE
142
+
143
+ In order to evaluate our model, we compare it to baselines in three different settings: First, pure video prediction, where the goal is to predict future frames of a video given previous ones. Second, the prediction of future object positions, which may be relevant for downstream tasks. Third, we extend one of the video datasets to a reinforcement learning task and investigate how our physics model may be utilized for sample-efficient, model-based reinforcement learning. With this paper, we also release a PyTorch implementation of STOVE.<sup>1</sup>
144
+
145
+ # 3.1 VIDEO AND STATE MODELING
146
+
147
+ Inspired by Watters et al. (2017), we consider grayscale videos of objects moving according to physical laws. In particular, we opt for the commonly used bouncing billiards balls dataset, as well as a dataset of gravitationally interacting balls. For further details on the datasets, see Appendix D. When trained using a single GTX 1080 Ti, STOVE converges after about 20 hours. As baselines, we compare to VRNNs (Chung et al., 2015), SQAIR (Kosiorek et al., 2018), and DDPAE (Hsieh et al., 2018). To allow for a fair comparison, we fix the number of objects predicted by SQAIR and DDPAE
148
+
149
+ ![](images/b0ee65bd0096d394ccfd928c499ba1c6418e920e319c3677b7012f9403696dff.jpg)
150
+ Figure 5: Comparison of the kinetic energies of the rollouts predicted by the models, computed based on position differences between successive states. Only STOVE's predictions reflect the conservation of total kinetic energy in the billiards data set. This is a quantitative measure of the convincing physical behavior in the rollout videos. (Left, center) Averages are over 300 trajectories from the test set. Shaded regions indicate one standard deviation. STOVE correctly predicts trajectories with constant energy, whereas SQAIR and DDPAE quickly diverge. (Right) Rolling average over a single, extremely long-term run. We conjecture that STOVE predicts physical behavior indefinitely. (Best viewed in color.)
151
+
152
+ ![](images/12feb9c91b3daca59ad6fe8e16a1ccf9c4f04ecf4e4220c85b868ea4307c5319.jpg)
153
+
154
+ ![](images/8e27e4a57d8da89426ef2e40c66820fe1522b2ab09432d9cb40a6500661abc2f.jpg)
155
+
156
+ to the correct amount. Furthermore, we compare to a supervised baseline: Here, we consider the ground truth positions and velocities to be fully observed, and train our dynamics model on them, resembling the setting of Battaglia et al. (2016). Since our model needs to infer object states from pixels, this baseline provides an upper bound on the predictive performance we can hope to achieve with our model. In turn, the size of the performance gap between the two is a good indicator of the quality of our state-space model. We also report the results obtained by combining our image model with a simple linear physics model, which linearly extrapolates the objects' trajectories. Since VRNN does not reason about object positions, we only evaluate it on the video prediction task. Similarly, the supervised baseline does not reason about images and is considered for the position prediction task only. For more information on the baselines, see Appendix E.
157
+
158
+ Fig. 4 depicts the reconstruction and prediction errors of the various models: Each model is given eight frames of video from the test set as input, which it then reconstructs. Conditioned on this input, the models predict the object positions or resulting video frames for the following 92 timesteps. The predictions are evaluated on ground truth data by computing the mean squared error between pixels and the Euclidean distance between positions based on the best available object matching. We outperform all baselines on both the state and the image prediction task by a large margin. Additionally, we perform strikingly close to the supervised model.
159
+
160
+ For the gravitational data, the prediction task appears easier, as all models achieve lower errors than on the billiards task. However, in this regime of easy prediction, precise access to the object states becomes more important, which is likely the reason why the gap between our approach and the supervised baseline is slightly more pronounced. Despite this, STOVE produces high-quality rollouts and outperforms the unsupervised baselines.
161
+
162
+ Table 1 underlines these results with concrete numbers. We also report results for three ablations of STOVE, which are obtained by (a) training a separate dynamics networks for inference with the same graph neural network architecture, instead of sharing weights with the generative model as argued for in section 2.3, (b) no longer explicitly modelling velocities $z_{\mathrm{velo}}$ in the state, and (c) removing the latent state variables $z_{\mathrm{latent}}$ . The ablation study shows that each of these components contributes positively to the performance of STOVE. See Appendix F for a comparison of training curves for the ablations.
163
+
164
+ Fig. 3 illustrates predictions on future object positions made by the models, after each of them was given eight consecutive frames from the datasets. Visually, we find that STOVE predicts physically plausible sequences over long timeframes. This desirable property is not captured by the rollout error: Due to the chaotic nature of our environments, infinitesimally close initial states diverge quickly and a model which perfectly follows the ground truth states cannot exist. After this divergence has
165
+
166
+ Table 1: Predictive performance of our approach, the baselines, and ablations (lower is better, best unsupervised values are bold). STOVE outperforms all unsupervised baselines and is almost indistinguishable from the supervised model on the billiards task. The values are computed by summing the prediction errors presented in Fig. 4 in the time interval $t \in [9,18]$ , i.e., the first ten predicted timesteps. In parentheses, standard deviations across multiple training runs are given.
167
+
168
+ <table><tr><td></td><td>Billiards (pixels)</td><td>Billiards (positions)</td><td>Gravity (pixels)</td><td>Gravity (positions)</td></tr><tr><td>STOVE (ours)</td><td>0.240(14)</td><td>0.418(20)</td><td>0.040(3)</td><td>0.142(7)</td></tr><tr><td>VRNN</td><td>0.526(14)</td><td>-</td><td>0.055(12)</td><td>-</td></tr><tr><td>SQAIR</td><td>0.591</td><td>0.804</td><td>0.070</td><td>0.194</td></tr><tr><td>DDPAE</td><td>0.405</td><td>0.482</td><td>0.120</td><td>0.298</td></tr><tr><td>Linear</td><td>0.844(5)</td><td>1.348(15)</td><td>0.196(2)</td><td>0.493(4)</td></tr><tr><td>Supervised</td><td>-</td><td>0.232(37)</td><td>-</td><td>0.013(2)</td></tr><tr><td>Abl: Double Dynamics</td><td>0.262</td><td>0.458</td><td>0.042</td><td>0.154</td></tr><tr><td>Abl: No Velocity</td><td>0.272</td><td>0.460</td><td>0.053</td><td>0.174</td></tr><tr><td>Abl: No Latent</td><td>0.338</td><td>0.050</td><td>0.089</td><td>0.235</td></tr></table>
169
+
170
+ occurred, the rollout error no longer provides any information on the quality of the learned physical behavior. We therefore turn to investigating the total kinetic energy of the predicted billiards trajectories. Since the collisions in the training set are fully elastic and frictional forces are not present, the initial energy should be conserved. Fig. 5 shows the kinetic energies of trajectories predicted by STOVE and its baselines, computed based on the position differences between consecutive timesteps. While the energies of SQAIR and DDPAE diverge quickly in less than 100 frames, the mean energies of STOVE's rollouts stay constant and are good estimates of the true energy. We have confirmed that STOVE predicts constant energies – and therefore displays realistic looking behavior – for at least 100000 steps. This is in stark contrast to the baselines, which predict teleporting, stopping, or overlapping objects after less than 100 frames. In the billiards dataset used by us and the literature, the total energy is the same for all sequences in the training set. See Appendix B for a discussion of how STOVE handles diverse energies.
171
+
172
+ # 3.2 MODEL-BASED CONTROL
173
+
174
+ To explore the usefulness of STOVE for reinforcement learning, we extend the billiards dataset into a reinforcement learning task. Now, the agent controls one of the balls using nine actions, which correspond to moving in one of the eight (inter)cardinal directions and staying at rest. The goal is to avoid collisions with the other balls, which elastically bounce off of each other, the walls, and the controlled ball. A negative reward of $-1$ is given whenever the controlled ball collides with one of the others. To allow the models to recognize the object controlled by the agents we now provide it with RGB input in which the balls are colored differently. Starting with a random policy, we iteratively gather observations from the environment, i.e. sequences of images, actions, and rewards. Using these, we train our model as described in Sec. 2.4. To obtain a policy based on our world model, we use Monte-Carlo tree search (MCTS), leveraging our model as a simulator for planning. Using this policy, we gather more observations and apply them to refine the world model. As an upper bound on the performance achievable in this manner, we report the results obtained by MCTS when the real environment is used for planning. As a model-free baseline, we consider PPO (Schulman et al., 2017), which is a state-of-the-art algorithm on comparable domains such as Atari games. To explore the effect of the availability of state information, we also run PPO on a version of the environment in which, instead of images, the ground-truth object positions and velocities are observed directly.
175
+
176
+ Learning curves for each of the agents are given in Fig. 6 (left), reported at intervals of 10000 samples taken from the environment, up to a total of 130000. For our model, we collect the first 50000 samples using a random policy to provide an initial training set. After that, the described training loop is used, iterating between collecting 10000 observations using an MCTS-based policy and refining the model using examples sampled from the pool of previously seen observations. After 130000 samples, PPO has not yet seen enough samples to converge, whereas our model quickly learns to meaningfully model the environment and thus produces a better policy at this stage. Even when PPO is trained on ground truth states, MCTS based on STOVE remains comparable.
177
+
178
+ ![](images/50979965a6b873545311b4eab8704e86eafeade5ca2a7d79b0d52053cc0f508b.jpg)
179
+ Figure 6: Comparison of all models on sample efficiency and final performance. (Left) Mean cumulative reward over 100 steps on the environment, averaged over 100 environments, using the specified policy. The shaded regions correspond to one-tenth of a standard deviation. In addition to the training curves, two constant baselines are shown, one representing a random policy and one corresponding to the MCTS based policy when using the real environment as a simulator. (Right) Final performance of all approaches, after training each model to convergence. The shaded region corresponds to one standard deviation. (Best viewed in color.)
180
+
181
+ ![](images/fee451fafecc979ef7651cce89615acbe7cc17bae0295ecd2c95471537832376.jpg)
182
+
183
+ After training each model to convergence, the final performance of all approaches is reported in Fig. 6 (right). In this case, PPO achieves slightly better results, however it only converges after training for approximately 4000000 steps, while our approach only uses 130000 samples. After around 1500000 steps, PPO does eventually surpass the performance of STOVE-based MCTS. Additionally, we find that MCTS on STOVE yields almost the same performance as on the real environment, indicating that it can be used to anticipate and avoid collisions accurately.
184
+
185
+ # 4 RELATED WORK
186
+
187
+ Multiple lines of work with the goal of video modeling or prediction have emerged recently. Prominently, the supervised modeling of physical interactions from videos has been investigated by Fragkiadaki et al. (2015), who train a model to play billiards with a single ball. Similarly, graph neural networks have been trained in a supervised fashion to predict the dynamics of objects from images (Watters et al., 2017; Sanchez-Gonzalez et al., 2018; Sun et al., 2018; 2019) or ground truth states (Kipf et al., 2018; Wang et al., 2018; Chang et al., 2017). A number of works learn object interactions in games in terms of rules instead of continuous dynamics (Guzdial et al., 2017; Ursen & Sariel, 2014). Janner et al. (2019) show successful planning based on learned interactions, but assume access to image segmentations. Several unsupervised approaches address the problem by fitting the parameters of a physics engine to data (Jaques et al., 2019; Wu et al., 2016; 2015). This necessitates specifying in advance which physical laws govern the observed interactions. In the fully unsupervised setting, mainly unstructured variational approaches have been explored (Babaeizadeh et al., 2017; Chung et al., 2015; Krishnan et al., 2015). However, without the explicit notion of objects, their performance in scenarios with interacting objects remains limited. Nevertheless, unstructured video models have recently been applied to model-based RL and have been shown to improve sample efficiency when used as a simulator for the real environment (Oh et al., 2015; Kaiser et al., 2020).
188
+
189
+ Only a small number of works incorporate objects into unsupervised video models. Xu et al. (2019) and Ehrhardt et al. (2018) take non-probabilistic autoencoding approaches to discovering objects in real-world videos. COBRA (Watters et al., 2019) represents a model-based RL approach based on MONet, but is restricted to environments with non-interacting objects and only uses one-step search to build its policy. Closest to STOVE are a small number of probabilistic models, namely SQAIR (Kosiorek et al., 2018), R-NEM (Van Steenkiste et al., 2018; Greff et al., 2017), and DDPAE (Hsieh et al., 2018). R-NEM learns a mixture model via expectation-maximization unrolled through time and handles interactions between objects in a factorized fashion. However, it lacks an explicitly
190
+
191
+ structured latent space, and requires noise in the input data to avoid local minima. Both DDPAE and SQAIR extend the AIR approach to work on videos using standard recurrent architectures. As discussed, this introduces a double recurrence over objects and time, which is detrimental for performance. However, SQAIR is capable of handling a varying number of objects, which is not something we consider in this paper.
192
+
193
+ # 5 CONCLUSION
194
+
195
+ We introduced STOVE, a structured, object-aware model for unsupervised video modeling and planning. It combines recent advances in unsupervised image modeling and physics prediction into a single compositional state-space model. The resulting joint model explicitly reasons about object positions and velocities, and is capable of generating highly accurate video predictions in domains featuring complicated non-linear interactions between objects. As our experimental evaluation shows, it outperforms previous unsupervised approaches and even approaches the performance and visual quality of a supervised model.
196
+
197
+ Additionally, we presented an extension of the video learning framework to the RL setting. Our experiments demonstrate that our model may be utilized for sample-efficient model-based control in a visual domain, making headway towards a long standing goal of the model-based RL community. In particular, STOVE yields good performance with more than one order of magnitude fewer samples compared to the model-free baseline, even when paired with a relatively simple planning algorithm like MCTS.
198
+
199
+ At the same time, STOVE also makes several assumptions for the sake of simplicity. Relaxing them provides interesting avenues for future research. First, we assume a fixed number of objects, which may be avoided by performing dynamic object propagation and discovery like in SQAIR. Second, we have inherited the assumption of rectangular object masks from AIR. Applying a more flexible model such as MONet (Burgess et al., 2019) or GENESIS (Engelcke et al., 2020) may alleviate this, but also poses additional challenges, especially regarding the explicit modeling of movement. Finally, the availability of high-quality learned state-space models enables the use of more sophisticated planning algorithms in visual domains (Chua et al., 2018). In particular, by combining planning with policy and value networks, model-free and model-based RL may be integrated into a comprehensive system (Buckman et al., 2018).
200
+
201
+ Acknowledgments. The authors thank Adam Kosiorek for his assistance with the SQAIR experiments and Emilien Dupont for helpful discussions about conservation laws in dynamics models. KK acknowledges the support of the Rhine-Main universities' network for "Deep Continuous-Discrete Machine Learning" (DeCoDeML).
202
+
203
+ # REFERENCES
204
+
205
+ Mohammad Babaeizadeh, Chelsea Finn, Dumitru Erhan, Roy H Campbell, and Sergey Levine. Stochastic variational video prediction. In Proceedings of ICLR, 2017.
206
+ Peter Battaglia, Razvan Pascanu, Matthew Lai, Danilo Jimenez Rezende, et al. Interaction networks for learning about objects, relations and physics. In Proceedings of NeurIPS, pp. 4502-4510, 2016.
207
+ Peter W Battaglia, Jessica B Hamrick, Victor Bapst, Alvaro Sanchez-Gonzalez, Vinicius Zambaldi, Mateusz Malinowski, Andrea Tacchetti, David Raposo, Adam Santoro, Ryan Faulkner, et al. Relational inductive biases, deep learning, and graph networks. arXiv preprint arXiv:1806.01261, 2018.
208
+ Philip Becker-Ehmck, Jan Peters, and Patrick Van Der Smagt. Switching linear dynamics for variational bayes filtering. In Proceedings of ICML, 2019.
209
+ Jacob Buckman, Danijar Hafner, George Tucker, Eugene Brevdo, and Honglak Lee. Sample-efficient reinforcement learning with stochastic ensemble value expansion. In Proceedings of NeurIPS, pp. 8224-8234, 2018.
210
+
211
+ Christopher P Burgess, Loic Matthey, Nicholas Watters, Rishabh Kabra, Irina Higgins, Matt Botvinick, and Alexander Lerchner. Monet: Unsupervised scene decomposition and representation. arXiv preprint arXiv:1901.11390, 2019.
212
+ Michael Chang, Tomer Ullman, Antonio Torralba, and Joshua B Tenenbaum. A compositional object-based approach to learning physical dynamics. In Proceedings of ICLR, 2017.
213
+ Kurtland Chua, Roberto Calandra, Rowan McAllister, and Sergey Levine. Deep reinforcement learning in a handful of trials using probabilistic dynamics models. In Proceedings of NeurIPS, pp. 4754-4765, 2018.
214
+ Junyoung Chung, Kyle Kastner, Laurent Dinh, Kratarth Goel, Aaron C Courville, and Yoshua Bengio. A recurrent latent variable model for sequential data. In Proceedings of NeurIPS, pp. 2980-2988, 2015.
215
+ Rémi Coulom. Efficient selectivity and backup operators in monte-carlo tree search. In Computers and Games, pp. 72-83. Springer Berlin Heidelberg, 2007.
216
+ Sebastien Ehrhardt, Aron Monszpart, Niloy Mitra, and Andrea Vedaldi. Unsupervised intuitive physics from visual observations. In Proceedings of ACCV, pp. 700-716, 2018.
217
+ Martin Engelcke, Adam R. Kosiorek, Oiwi Parker Jones, and Ingmar Posner. Genesis: Generative scene inference and sampling with object-centric latent representations. In Proceedings of ICLR, 2020.
218
+ Mustafa Ersen and Sanem Sariel. Learning behaviors of and interactions among objects through spatio-temporal reasoning. IEEE TCIAIG, 7(1):75-87, 2014.
219
+ SM Ali Eslami, Nicolas Heess, Theophane Weber, Yuval Tassa, David Szepesvari, Geoffrey E Hinton, et al. Attend, infer, repeat: Fast scene understanding with generative models. In Proceedings of NeurIPS, pp. 3225-3233, 2016.
220
+ Katerina Fragkiadaki, Pulkit Agrawal, Sergey Levine, and Jitendra Malik. Learning visual predictive models of physics for playing billiards. arXiv preprint arXiv:1511.07404, 2015.
221
+ Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Proceedings of NeurIPS, pp. 2672-2680, 2014.
222
+ Klaus Greff, Sjoerd van Steenkiste, and Jurgen Schmidhuber. Neural expectation maximization. In Advances in Neural Information Processing Systems, pp. 6691-6701, 2017.
223
+ Klaus Greff, Raphaël Lopez Kaufmann, Rishab Kabra, Nick Watters, Chris Burgess, Daniel Zoran, Loic Matthew, Matthew Botvinick, and Alexander Lerchner. Multi-object representation learning with iterative variational inference. In Proceedings of ICML, 2019.
224
+ U. Grenander. Lectures in Pattern Theory: Vol. 2 Pattern Analysis. Springer-Verlag, 1976.
225
+ Matthew Guzdial, Boyang Li, and Mark O Riedl. Game engine learning from video. In Proceedings of IJCAI, 2017.
226
+ Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural computation, 9(8): 1735-1780, 1997.
227
+ Jun-Ting Hsieh, Bingbin Liu, De-An Huang, Li F Fei-Fei, and Juan Carlos Niebles. Learning to decompose and disentangle representations for video prediction. In Proceedings of NeurIPS, pp. 517-526, 2018.
228
+ Michael Janner, Sergey Levine, William T. Freeman, Joshua B. Tenenbaum, Chelsea Finn, and Jiajun Wu. Reasoning about physical interactions with object-centric models. In Proceedings of ICLR, 2019.
229
+ Miguel Jaques, Michael Burke, and Timothy Hospedales. Physics-as-inverse-graphics: Joint unsupervised learning of objects and physics from video. arXiv preprint arXiv:1905.11169, 2019.
230
+
231
+ Łukasz Kaiser, Mohammad Babaeizadeh, Piotr Miłos, Błajew Osiński, Roy H Campbell, Konrad Czechowski, Dumitru Erhan, Chelsea Finn, Piotr Kozakowski, Sergey Levine, Afroz Mohiuddin, Ryan Sepassi, George Tucker, and Henryk Michalewski. Model based reinforcement learning for atari. In Proceedings of ICLR, 2020.
232
+ Maximilian Karl, Maximilian Soelch, Justin Bayer, and Patrick van der Smagt. Deep Variational Bayes Filters: Unsupervised Learning of State Space Models from Raw Data. In Proceedings of ICLR, 2017.
233
+ Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Proceedings of ICLR, 2015.
234
+ Diederik P Kingma and Max Welling. Auto-encoding variational bayes. In Proceedings of ICLR, 2014.
235
+ Thomas Kipf, Ethan Fetaya, Kuan-Chieh Wang, Max Welling, and Richard Zemel. Neural relational inference for interacting systems. In Proceedings of ICML, 2018.
236
+ Adam Kosiorek, Hyunjik Kim, Yee Whye Teh, and Ingmar Posner. Sequential attend, infer, repeat: Generative modelling of moving objects. In Proceedings of NeurIPS, pp. 8606-8616, 2018.
237
+ Rahul G. Krishnan, Uri Shalit, and David Sontag. Deep kalman filters. arXiv preprint arXiv:1812.08434, 2015.
238
+ Harold W Kuhn. The hungarian method for the assignment problem. *Naval research logistics quarterly*, 2(1-2):83-97, 1955.
239
+ Junhyuk Oh, Xiaoxiao Guo, Honglak Lee, Richard L Lewis, and Satinder Singh. Action-conditional video prediction using deep networks in atari games. In Proceedings of NeurIPS, pp. 2845-2853, 2015.
240
+ Alvaro Sanchez-Gonzalez, Nicolas Heess, Jost Tobias Springenberg, Josh Merel, Martin Riedmiller, Raia Hadsell, and Peter Battaglia. Graph networks as learnable physics engines for inference and control. In Proceedings of ICML, 2018.
241
+ Adam Santoro, David Raposo, David G Barrett, Mateusz Malinowski, Razvan Pascanu, Peter Battaglia, and Timothy Lillicrap. A simple neural network module for relational reasoning. In Proceedings of NeurIPS, pp. 4967-4976, 2017.
242
+ John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.
243
+ Karl Stelzner, Robert Peharz, and Kristian Kersting. Faster attend-infer-repeat with tractable probabilistic models. In Proceedings of ICML, pp. 5966-5975, 2019.
244
+ Chen Sun, Abhinav Shrivastava, Carl Vondrick, Kevin Murphy, Rahul Sukthankar, and Cordelia Schmid. Actor-centric relation network. In Proceedings of ECCV, 2018.
245
+ Chen Sun, Abhinav Shrivastava, Carl Vondrick, Rahul Sukthankar, Kevin Murphy, and Cordelia Schmid. Relational action forecasting. In Proceedings of CVPR, 2019.
246
+ Richard S Sutton and Andrew G Barto. Reinforcement learning: An introduction. MIT Press, Cambridge, 2011.
247
+ Sjoerd Van Steenkiste, Michael Chang, Klaus Greff, and Jürgen Schmidhuber. Relational neural expectation maximization: Unsupervised discovery of objects and their interactions. In Proceedings of ICLR, 2018.
248
+ Antonio Vergari, Robert Peharz, Nicola Di Mauro, Alejandro Molina, Kristian Kersting, and Floriana Esposito. Sum-product autoencoding: Encoding and decoding representations using sum-product networks. In Proceedings of AAAI, 2018.
249
+ Tingwu Wang, Renjie Liao, Jimmy Ba, and Sanja Fidler. Nervenet: Learning structured policy with graph neural networks. In Proceedings of ICLR, 2018.
250
+
251
+ Nicholas Watters, Daniel Zoran, Theophane Weber, Peter Battaglia, Razvan Pascanu, and Andrea Tacchetti. Visual interaction networks: Learning a physics simulator from video. In Proceedings of NeurIPS, pp. 4539-4547, 2017.
252
+ Nicholas Watters, Loic Matthew, Matko Bosnjak, Christopher P Burgess, and Alexander Lerchner. *Cobra: Data-efficient model-based rl through unsupervised object discovery and curiosity-driven exploration.* arXiv preprint arXiv:1905.09275, 2019.
253
+ Jiajun Wu, Ilker Yildirim, Joseph J Lim, Bill Freeman, and Josh Tenenbaum. Galileo: Perceiving physical object properties by integrating a physics engine with deep learning. In Proceedings of NeurIPS, pp. 127-135, 2015.
254
+ Jiajun Wu, Joseph J Lim, Hongyi Zhang, Joshua B Tenenbaum, and William T Freeman. Physics 101: Learning physical object properties from unlabeled videos. In Proceedings of BMVC, 2016.
255
+ Zhenjia Xu, Zhijian Liu, Chen Sun, Kevin Murphy, William T. Freeman, Joshua B. Tenenbaum, and Jiajun Wu. Modeling parts, structure, and system dynamics via predictive learning. In Proceedings of ICLR, 2019.
256
+ Cheng Zhang, Judith Butepage, Hedvig Kjellström, and Stephan Mandt. Advances in variational inference. IEEE TPAMI, 2019.
257
+ Jie Zhou, Ganqu Cui, Zhengyan Zhang, Cheng Yang, Zhiyuan Liu, and Maosong Sun. Graph neural networks: A review of methods and applications. arXiv preprint arXiv:1812.08434, 2018.
258
+
259
+ # A RECONSTRUCTIONS: SPRITES DATA
260
+
261
+ SuPAIR does not need a latent description of the objects' appearances. Nevertheless, object reconstructions can be obtained by using a variant of approximate MPE (most probable explanation) in the sum-product networks as proposed by Vergari et al. (2018). We follow the AIR approach and reconstruct each object separately and paste it into the canvas using spatial transformers. Unlike AIR, SuPAIR explicitly models the background using a separate background SPN. A reconstruction of the background is also obtained using MPE.
262
+
263
+ To demonstrate the capabilities of our image model, we also trained our model on a variant of the gravity data in which the round balls were replaced by a random selection of four different sprites of the same size. Fig. 7 shows the reconstructions obtained from SuPAIR when trained on these more complex object shapes.
264
+
265
+ # B STUDY OF ENERGIES
266
+
267
+ As discussed in Sec. 3.1, the energies of the ground truth data were constant for all sequences during the training of STOVE. However, initial velocities are drawn from a random normal distribution. This is the standard procedure of generating the bouncing balls data set as used by previous publications. Under these circumstances, STOVE does indeed learn to discover and replicate the total energies of the system, while SQAIR and DDPAE do not. Even if trained on constant energy data, STOVE does to some extent generalise to unseen energies. Observed velocities and therefore total energies are highly correlated with the true total kinetic energies of the sequences. However as prediction starts, STOVE quickly regresses to the energy of the training set, see Fig. 8 (left). If trained on a dataset of diverse total energies, the performance of modelling sequences of different energies increases, see Fig. 8 (right). Rollouts now initially represent the true energy of the observed sequence, although this estimate of the true energy diverges over a time span of around 500 frames to a constant but wrong energy value. This is an improvement over the model trained on constant energy data, where the regression to the training data energy happens much quicker within around 10 frames. Note that this does not drastically decrease the visual quality of the rollouts as the change of total energy over 500 frames is gradual enough. We leave the reliable prediction of rollouts with physically valid constant energy for sequences of varying energies for future work.
268
+
269
+ # C MODEL DETAILS
270
+
271
+ Here, we present additional details on the architecture and hyperparameters of STOVE.
272
+
273
+ # C.1 INFERENCE ARCHITECTURE
274
+
275
+ The object detection network for $q(z_{t,\text{where}} \mid x_t)$ is realised by an LSTM (Hochreiter & Schmidhuber, 1997) with 256 hidden units, which outputs the mean and standard deviation of the objects' two-dimensional position and size distributions, i.e. $q(z_{t,\text{pos},\text{size}}^o \mid x_t)$ with $2 \cdot 2 \cdot 2 = 8$ parameters per object. Given such position distributions for two consecutive timesteps $q(z_{t-1,\text{pos}} \mid x_{t-1})$ , $q(z_{t,\text{pos}} \mid x_t)$ , with parameters $\mu_{z_{t-1,\text{pos}}^o}$ , $\sigma_{z_{t-1,\text{pos}}^o}$ , $\mu_{z_{t,\text{pos}}^o}$ , $\sigma_{z_{t,\text{pos}}^o}$ , the following velocity estimate based on the difference in position is constructed:
276
+
277
+ $$
278
+ q (z _ {t, \mathrm {v e l o}} ^ {o} \mid x _ {t}, x _ {t - 1}) = \mathcal {N} (\mu_ {z _ {t, \mathrm {p o s}} ^ {o}} - \mu_ {z _ {t - 1, \mathrm {p o s}} ^ {o}}, \sigma_ {z _ {t, \mathrm {p o s}} ^ {o}} ^ {2} + \sigma_ {z _ {t - 1, \mathrm {p o s}} ^ {o}} ^ {2}).
279
+ $$
280
+
281
+ As described in Sec. 2.3, positions and velocities are also inferred from the dynamics model as $q(z_{t,\mathrm{pos}}^o \mid z_{t - 1})$ and $q(z_{t,\mathrm{velo}}^o \mid z_{t - 1})$ . A joint estimate, including information from both image model and dynamics prediction, is obtained by multiplying the respective distributions and renormalizing. Since both $q$ -distributions are Gaussian, the normalized product is again Gaussian, with mean and
282
+
283
+ ![](images/250d42d0bf58bbf0abe9cdf56e8e0a79bd2957ccd91ebeb2abf1af06c577d4d4.jpg)
284
+ Figure 7: Reconstructions obtained from our image model when using more varied shapes.
285
+
286
+ standard deviation are given by
287
+
288
+ $$
289
+ \begin{array}{l} q \left(z _ {t} \mid x _ {t}, z _ {t - 1}\right) \propto q \left(z _ {t} \mid x _ {t}\right) \cdot q \left(z _ {t} \mid z _ {t - 1}\right) \\ = \mathcal {N} \left(z _ {t}; \mu_ {t, i}, \sigma_ {t, i} ^ {2}\right) \cdot \mathcal {N} \left(z _ {t}; \mu_ {t, d}, \sigma_ {t, d} ^ {2}\right) \\ = \mathcal {N} \left(z _ {t}; \mu_ {t}, \sigma_ {t} ^ {2}\right) \\ \mu_ {t} = \frac {\sigma_ {t , d} ^ {2} \mu_ {t , i} + \sigma_ {t , i} ^ {2} \mu_ {t , d}}{\sigma_ {t , d} ^ {2} + \sigma_ {t , i} ^ {2}} \\ \frac {1}{\sigma_ {t} ^ {2}} = \frac {1}{\sigma_ {t , d} ^ {2}} + \frac {1}{\sigma_ {t , i} ^ {2}}, \\ \end{array}
290
+ $$
291
+
292
+ where we relax our notation for readability $z_{t} \in [z_{t,\mathrm{pos}}^{o}, z_{t,\mathrm{velo}}^{o}]$ and the indices $i$ and $d$ refer to the parameters obtained from the image and dynamics model. This procedure is applied independently for the positions and velocities of each object.
293
+
294
+ For $z_{t,\text{latent}}^o$ , we choose dimension 12, such that a full state $z_t^o = (z_{t,\text{pos}}^o, z_{t,\text{size}}^o, z_{t,\text{velo}}^o, z_{t,\text{latent}}^o)$ is 18-dimensional.
295
+
296
+ # C.2 GRAPH NEURAL NETWORK
297
+
298
+ The dynamics prediction is given by the following series of transformations applied to each input state of shape (batch size, number of objects, $l$ ), where $l = 16$ , since currently, size information is not propagated through the dynamics prediction.
299
+
300
+ - $S_{1}$ : Encode input state with linear layer $[l, 2l]$ .
301
+ - $S_{2}$ : Apply linear layer $[2l, 2l]$ to $S_{1}$ followed by ReLU non-linearity.
302
+ - $S_{3}$ : Apply linear layer $[2l, 2l]$ to $S_{2}$ and add result to $S_{2}$ . This gives the dynamics prediction without relational effects, corresponding to $g(z_{t}^{o})$ in Eq. 1.
303
+ - $C_1$ : The following steps obtain the relational aspects of dynamics prediction, corresponding to $h(z_t^o,z_t^{o'})$ in Eq. 1. Concatenate the encoded state $S_1^o$ pairwise with all state encoding, yielding a tensor of shape (batch size, number of objects, number of objects, 4l).
304
+ - $C_2$ : Apply linear layer $[4l, 4l]$ to $C_1$ followed by ReLU.
305
+ - $C_3$ : Apply linear layer $[4l, 2l]$ to $C_2$ followed by ReLU.
306
+ - $C_4$ : Apply linear layer $[2l, 2l]$ to $C_3$ and add to $C_3$ .
307
+ - $A_{1}$ : To obtain attention coefficients $\alpha (z_t^o,z_t^{o'})$ , apply linear layer [4l, 4l] to $C_1$ followed by ReLU.
308
+ - $A_{2}$ : Apply linear layer $[4l, 2l]$ to $A_{1}$ followed by ReLU.
309
+ - $A_{3}$ : Apply linear layer $[2l, 1]$ to $A_{2}$ and apply exponential function.
310
+ - $R_{1}$ : Multiply $C_{4}$ with $A_{3}$ , where diagonal elements of $A_{3}$ are masked out to ensure that $R_{1}$ only covers cases where $o \neq o'$ .
311
+ - $R_{2}$ : Sum over $R_{1}$ for all $o'$ , to obtain tensor of shape (batch size, number of objects, $2l$ ). This is the relational dynamics prediction.
312
+
313
+ ![](images/46bb85517c1ae2e02d91c69e07fe014bcd6ca40a11d38653b3fa5eb6ee318844.jpg)
314
+ Figure 8: Mean kinetic energy observed/predicted by STOVE over true energy of the sequences. (left) STOVE is trained on sequences of constant kinetic energy. As can be seen from the blue scatter points, STOVE manages to predict sequences of arbitrary lengths which, on average, preserve the constant energy of the test set. When STOVE is applied to sequences of different energies, it manages to infer these energies from observed frames fairly well, with inaccuracies compounding at larger energies (red). In the following prediction, however, the mean predicted energies diverge quickly to the energy value of the training set (orange and green). (right) STOVE is now trained on sequences of varying energies. Compared to the constant energy training, energies from observed as well as predicted energies improve drastically. The predictions no longer immediately regress towards a specific value (orange). However after 100 frames, the quality of the predicted energies still regresses to a wrong value (green). (all) The observed values refers to energies obtained as the mean energy value over the six initially observed frames. The short (long) time frame refers to an energy obtained as the mean energy over the first 10 (100) frames of prediction. (Best viewed in color.)
315
+
316
+ ![](images/7f6a747a747307c9c4cdde9f2ea57baacca92dc9cb55f51259f5b15e97e71318.jpg)
317
+
318
+ - $D_{1}$ : Sum relational dynamics $R_{2}$ and self-dynamics $S_{3}$ , obtaining the input to $f$ in Eq. 1.
319
+ - $D_{2}$ : Apply linear layer $[2l, 2l]$ to $D_{1}$ followed by tanh non-linearity.
320
+ - $D_{3}$ : Apply linear layer $[2l, 2l]$ to $D_{2}$ followed by tanh non-linearity and add result to $D_{2}$ .
321
+ - $D_4$ : Concatenate $D_3$ and $S_1$ , and apply linear layer $[4l, 2l]$ followed by tanh.
322
+ - $D_{5}$ : Apply linear layer $[2l, 2l]$ to $D_{4}$ and add result to $D_{4}$ to obtain final dynamics prediction.
323
+
324
+ The output $D_{5}$ has shape (batch size, number of objects, $2l$ ), twice the size of means and standard deviations over the next predicted state.
325
+
326
+ For the model-based control scenario, the one-hot encoded actions (batch size, action space) are transformed with a linear layer [action space, number of objects · encoding size] and reshaped to (action space, number of objects, encoding size). The action embedding and the object appearances (batch size, number of objects, 3) are then concatenated to the input state. The rest of the dynamics prediction follows as above. The reward prediction consists of the following steps:
327
+
328
+ - $H_{1}$ : Apply linear layer $[2l, 2l]$ to $D_{1}$ followed by ReLU.
329
+ - $H_{2}$ : Apply linear layer $[2l, 2l]$ to $H_{1}$ .
330
+ - $H_{3}$ : Sum over object dimension to obtain tensor of shape (batch size, l).
331
+ - $H_4$ : Apply linear layer $[l, l/2]$ to $H_3$ followed by ReLU.
332
+ - $H_{5}$ : Apply linear layer $[l/2, l/4]$ to $H_{4}$ followed by ReLU.
333
+ - $H_{5}$ : Apply linear layer $[l / 4, l]$ to $H_{4}$ followed by a sigmoid non-linearity.
334
+
335
+ $H_{5}$ then gives the final reward prediction.
336
+
337
+ # C.3 STATE INITIALIZATION
338
+
339
+ In the first two timesteps, we cannot yet apply STOVE's main inference step $q(z_{t} \mid z_{t-1}, x_{t}, x_{t-1})$ as described above. In order to initialize the latent state over the first two frames, we apply a simplified architecture and only use a partial state at $t = 0$ .
340
+
341
+ At $t = 0$ , $z_0 \sim q(z_{0,(\mathrm{pos},\mathrm{size})} \mid x_0)$ is given purely by the object detection network, since no previous states, which could be propagated, exist. $z_0$ is incomplete insofar as it does not contain velocity information or latents. At $t = 1$ , $q(z_{1,\mathrm{pos},\mathrm{size}} \mid x_1,x_0)$ is still given purely based on the object detection network. Note that for a dynamics prediction of $z_1$ , velocity information at $t = 0$ would need to be available. However, at $t = 1$ , velocities can be constructed based on the differences between the previously inferred object positions. We sample $z_{1,\mathrm{latent}}$ from the prior Gaussian distribution to assemble the first full initial state $z_1$ . At $t \geq 2$ , the full inference network can be run: States are inferred both from the object detection network $q(z_t \mid x_t,x_{t - 1})$ as well as propagated using the dynamics model $q(z_{t} \mid z_{t - 1})$ .
342
+
343
+ In the generative model, similar adjustments are made: $p(z_{0,\mathrm{pos,size}})$ is given by a uniform prior, velocities and latents are omitted. At $t = 1$ , velocities are sampled from a uniform distribution in planar coordinates $p(z_{1,\mathrm{velo}})$ and positions are given by a simple linear dynamics model $p(z_{1,\mathrm{pos}}|z_{0,\mathrm{pos}},z_{1,\mathrm{velo}}) = \mathcal{N}(z_{0,\mathrm{pos}} + z_{1,\mathrm{velo}},\sigma)$ . Latents $z_{1,\mathrm{latent}}$ are sampled from a Gaussian prior. Starting at $t = 2$ , the full dynamics model is used.
344
+
345
+ # C.4 TRAINING PROCEDURE
346
+
347
+ Our model was trained using the Adam optimizer (Kingma & Ba, 2015), with a learning rate of $2 \times 10^{-3} \exp(-40 \times 10^{-3} \cdot \text{step})$ for a total of 83000 steps with a batch size of 256.
348
+
349
+ # D DATA DETAILS
350
+
351
+ For the billiards and gravitational data, 1000 sequences of length 100 were generated for training. From these, subsequences of lengths 8 were sampled and used to optimize the ELBO. A test dataset of 300 sequences of length 100 was also generated and used for all evaluations. The pixel resolution of the dataset was $32 \times 32$ for the billiards data and $50 \times 50$ for the gravity data. All models for video prediction were learned on grayscale data, with objects of identical appearance. The $O = 3$ balls were initialised with uniformly random positions and velocities, rejecting configurations with overlap. They are rendered using anti-aliasing. The billiards data models the balls as circular objects, which perform elastic collision with each other or the walls of the environment. For the gravity data, the balls are modeled as point masses, where, following Watters et al. (2017), we clip the gravitational force to avoid slingshot effects. Also, we add an additional basin of attraction towards the center of the canvas and model the balls in their center off mass system to avoid drift. Velocities here are initialised orthogonal to the center of the canvas for a stabilising effect. For full details we refer to the file envs.py in the provided code.
352
+
353
+ # E BASELINES FOR VIDEO MODELING
354
+
355
+ Following Kosiorek et al. (2018), we experimented with different hyperparameter configurations for VRNNs. We varied the sizes of the hidden and latent states $[h, z]$ , experimenting with the values [256, 16], [512, 32], [1024, 64], and [2048, 32]. We found that increasing the model capacity beyond [512, 32] did not yield large increases in performance, which is why we chose the configuration [512, 32] for our experiments. Our VRNN implementation is written in PyTorch and based on https://github.com/emited/VariationalRecurrentNeuralNetwork.
356
+
357
+ SQAIR can handle a variable number of objects in each sequence. However, to allow for a fairer comparison to STOVE, we fixed the number of objects to the correct number. This means that in the first timestep, exactly three objects are discovered, which are then propagated in all following timesteps, without further discoveries. Our implementation is based on the original implementation provided by the authors at https://github.com/akosiorek/sqair.
358
+
359
+ ![](images/26dab4052c7800d9d1428586e8a2699c0d6b3b662d0541fe4893f9a4d9bd6ae4.jpg)
360
+ Figure 9: Displayed is the mean predicted position error over a rollout length of 8 frames as training progresses for the billiards (left) and gravity (right) scenario for STOVE and its ablations. (Best viewed in color.)
361
+
362
+ ![](images/53ab8addbb565b5af2f7fa93b0a132ad1a8e8b2abc4c4c5de27a91a3ae92a5d8.jpg)
363
+
364
+ The DDPAE experiments were performed using the implementation available at https://github.com/jthsieh/DDPAE-video-prediction. Default parameters for training DDPAE with billiards datasets are provided with the code. However, the resolution of our billiards (32 pixels) and gravity (64 pixels) datasets is different to the resolution DDPAE expects (64 pixels). While we experimented with adjusting DDPAE parameters such as the latent space dimension to fit our different resolution, best results were obtained when bilinearly scaling our data to the resolution DDPAE expects. DDPAE was trained for 400000 steps, which sufficed for convergence of the models' test set error.
365
+
366
+ The linear baseline was obtained as follows: For the first 8 frames, we infer the full model state using STOVE. We then take the last inferred positions and velocities of each object and predict future positions by assuming constant, uniform motions for each object. We do not allow objects to leave the frame, i.e. when objects reach the canvas boundary after some timesteps, they stick to it.
367
+
368
+ Since our dynamics model requires only object positions and velocities as input, it is trivial to construct a supervised baseline for our physics prediction by replacing the SuPAIR-inferred states with real, ground-truth states. On these, the model can then be trained in supervised fashion.
369
+
370
+ # F TRAINING CURVES OF ABLATIONS
371
+
372
+ In Fig. 9 we display learning curves for STOVE and presented ablations. As mentioned in the main text, the ablations demonstrate the value of the reuse of the dynamics model, the explicit inclusion of a velocity value, and the presence of unstructured latent space in the dynamics model. (Best viewed in color.)
373
+
374
+ # G DETAILS ON THE REINFORCEMENT LEARNING MODELS
375
+
376
+ Our MCTS implementation uses the standard UCT formulation for exploration/exploitation. The $c$ parameter is set to 1. in all our experiments. Since the environment does not provide a natural endpoint, we cut off all rollouts at a depth of 20 timesteps. We found this to be a good trade-off between runtime and accuracy.
377
+
378
+ When expanding a node on the true environment, we compute the result of the most promising action, and then start a rollout using a random policy from the resulting state. For the final evaluation, a total of 200 nodes are expanded. To better utilize the GPU, a slightly different approach is used for STOVE. When we expand a node in this setting, we predict the results of all actions simultaneously, and compute a rollout from each resulting position. In turn, only 50 nodes are expanded. To estimate the node value function, the average reward over all rollouts is propagated back to the root and each node's visit counter is increased by 1. Furthermore, we discount the reward predicted STOVE with a factor of 0.95 per timestep to account for the higher uncertainty of longer rollouts. This is not done in the baseline running on the real environment, since it behaves deterministically.
379
+
380
+ For PPO, we employ a standard convolutional neural network as an actor-critic for the evaluation on images and a MLP for the evaluation on states. The image network consists of two convolutional layers, each using 32 output filters with a kernel size of 4 and 3 respectively and a stride of 2. The MLP consists of two fully connected layers with 128 and 64 hidden units. In both cases, an additional fully connected layer links the outputs of the respective base to an actor and a critic head. For the convolutional base, this linking layer employs 512 hidden units, for the MLP 64. All previously mentioned layers use rectified linear activations. The actor head predicts a probability distribution over next actions using a softmax activation function while the critic head outputs a value estimation for the current state using a linear prediction. We tested several hyperparameter configurations but found the following to be the most efficient one. To update the actor-critic architecture, we sample 32 trajectories of length 16 from separate environments in every batch. The training uses an Adam optimizer with a learning rate of $2 \times 10^{-4}$ and $\epsilon$ value of $1 \times 10^{-5}$ . The clipping parameter of PPO is set to $1 \times 10^{-1}$ . We update the network for 4 epochs in each batch using 32 mini-batches of the sampled data. The value loss is weighted at $5 \times 10^{-1}$ and the entropy coefficient is set to $1 \times 10^{-2}$ .
structuredobjectawarephysicspredictionforvideomodelingandplanning/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e580c7b07180aa5ece761b29aa1181769ad44f256256aea6aef2566583f4d0e3
3
+ size 502795
structuredobjectawarephysicspredictionforvideomodelingandplanning/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcbdda36473a22c8d52ca42d22af4f8d3c30d87ba23bb1258d353d0f45b4f78b
3
+ size 626334
subpolicyadaptationforhierarchicalreinforcementlearning/b11c88d9-c48e-4534-80da-23c1884da58e_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d1088d1af7955e0ebcec9a64dfff873873b3af2572da373f9d7850e6b6320e5
3
+ size 94766
subpolicyadaptationforhierarchicalreinforcementlearning/b11c88d9-c48e-4534-80da-23c1884da58e_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d41faccc0daff0db9f3314c5802f1fee0685a4f867c1c28663b8db565fca99db
3
+ size 114682
subpolicyadaptationforhierarchicalreinforcementlearning/b11c88d9-c48e-4534-80da-23c1884da58e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:362d0e22c38cb4fa2efb1c92d7ba0f98c785109819c60cfe049beef37d8a3bfd
3
+ size 4061045
subpolicyadaptationforhierarchicalreinforcementlearning/full.md ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SUB-POLICY ADAPTATION FOR HIERARCHICAL REINFORCEMENT LEARNING
2
+
3
+ Alexander C. Li*, Carlos Florensa*, Ignasi Clavera, Pieter Abbeel
4
+
5
+ University of California, Berkeley
6
+
7
+ {alexli1, florensa, iclavera, pabbeel}@berkeley.edu
8
+
9
+ # ABSTRACT
10
+
11
+ Hierarchical reinforcement learning is a promising approach to tackle long-horizon decision-making problems with sparse rewards. Unfortunately, most methods still decouple the lower-level skill acquisition process and the training of a higher level that controls the skills in a new task. Leaving the skills fixed can lead to significant sub-optimality in the transfer setting. In this work, we propose a novel algorithm to discover a set of skills and continuously adapt them along with the higher level even when training on a new task. Our main contributions are two-fold. First, we derive a new hierarchical policy gradient with an unbiased latent-dependent baseline, and we introduce Hierarchical Proximal Policy Optimization (HiPPO), an on-policy method to efficiently train all levels of the hierarchy jointly. Second, we propose a method of training time-abstractions that improves the robustness of the obtained skills to environment changes. Code and videos are available. $^{1}$ .
12
+
13
+ # 1 INTRODUCTION
14
+
15
+ Reinforcement learning (RL) has made great progress in a variety of domains, from playing games such as Pong and Go (Mnih et al., 2015; Silver et al., 2017) to automating robotic locomotion (Schulman et al., 2015; Heess et al., 2017), dexterous manipulation (Florensa et al., 2017b; OpenAI et al., 2018), and perception (Nair et al., 2018; Florensa et al., 2018). Yet, most work in RL is still learning from scratch when faced with a new problem. This is particularly inefficient when tackling multiple related tasks that are hard to solve due to sparse rewards or long horizons.
16
+
17
+ A promising technique to overcome this limitation is hierarchical reinforcement learning (HRL) (Sutton et al., 1999). In this paradigm, policies have several modules of abstraction, allowing to reuse subsets of the modules. The most common case consists of temporal hierarchies (Precup, 2000; Dayan & Hinton, 1993), where a higher-level policy (manager) takes actions at a lower frequency, and its actions condition the behavior of some lower level skills or sub-policies. When transferring knowledge to a new task, most prior works fix the skills and train a new manager on top. Despite having a clear benefit in kick-starting the learning in the new task, having fixed skills can considerably cap the final performance on the new task (Florensa et al., 2017a). Little work has been done on adapting pre-trained sub-policies to be optimal for a new task.
18
+
19
+ In this paper, we develop a new framework for simultaneously adapting all levels of temporal hierarchies. First, we derive an efficient approximated hierarchical policy gradient. The key insight is that, despite the decisions of the manager being unobserved latent variables from the point of view of the Markovian environment, from the perspective of the sub-policies they can be considered as part of the observation. We show that this provides a decoupling of the manager and sub-policy gradients, which greatly simplifies the computation in a principled way. It also theoretically justifies a technique used in other prior works (Frans et al., 2018). Second, we introduce a sub-policy specific baseline for our hierarchical policy gradient. We prove that this baseline is unbiased, and our experiments reveal faster convergence, suggesting efficient gradient variance reduction. Then, we introduce a more stable way of using this gradient, Hierarchical Proximal Policy Optimization (HiPPO). This method helps us take more conservative steps in our policy space (Schulman et al., 2017), critical in hierarchies
20
+
21
+ because of the interdependence of each layer. Results show that HiPPO is highly efficient both when learning from scratch, i.e. adapting randomly initialized skills, and when adapting pretrained skills on a new task. Finally, we evaluate the benefit of randomizing the time-commitment of the sub-policies, and show it helps both in terms of final performance and zero-shot adaptation on similar tasks.
22
+
23
+ # 2 PRELIMINARIES
24
+
25
+ We define a discrete-time finite-horizon discounted Markov decision process (MDP) by a tuple $M = (\mathcal{S},\mathcal{A},\mathcal{P},r,\rho_0,\gamma ,H)$ , where $\mathcal{S}$ is a state set, $\mathcal{A}$ is an action set, $\mathcal{P}:\mathcal{S}\times \mathcal{A}\times \mathcal{S}\to \mathbb{R}_{+}$ is the transition probability distribution, $\gamma \in [0,1]$ is a discount factor, and $H$ the horizon. Our objective is to find a stochastic policy $\pi_{\theta}$ that maximizes the expected discounted return within the MDP, $\eta (\pi_{\theta}) = \mathbb{E}_{\tau}[\sum_{t = 0}^{H}\gamma^{t}r(s_{t},a_{t})]$ . We use $\tau = (s_0,a_0,\dots)$ to denote the entire state-action trajectory, where $s_0\sim \rho_0(s_0)$ , $a_{t}\sim \pi_{\theta}(a_{t}|s_{t})$ , and $s_{t + 1}\sim \mathcal{P}(s_{t + 1}|s_t,a_t)$ .
26
+
27
+ ![](images/efb8518ac264ad14b3deabec434a3da6f9c18efde852d5c81246e9466328788c.jpg)
28
+ Figure 1: Temporal hierarchy studied in this paper. A latent code $z_{t}$ is sampled from the manager policy $\pi_{\theta_h}(z_t|s_t)$ every $p$ time-steps, using the current observation $s_{kp}$ . The actions $a_{t}$ are sampled from the sub-policy $\pi_{\theta_l}(a_t|s_t,z_{kp})$ conditioned on the same latent code from $t = kp$ to $(k + 1)p - 1$
29
+
30
+ In this work, we propose a method to learn a hierarchical policy and efficiently adapt all the levels in the hierarchy to perform a new task. We study hierarchical policies composed of a higher level, or manager $\pi_{\theta_h}(z_t|s_t)$ , and a lower level, or sub-policy $\pi_{\theta_l}(a_{t'}|z_t,s_{t'})$ . The higher level does not take actions in the environment directly, but rather outputs a command, or latent variable $z_{t}\in \mathcal{Z}$ , that conditions the behavior of the lower level. We focus on the common case where $\mathcal{Z} = \mathbb{Z}_n$ making the manager choose among $n$ sub-policies, or skills, to execute. The manager typically operates at a lower frequency than the sub-policies, only observing the environment every $p$ time-steps. When the manager receives a new observation, it decides which low level policy to commit to for $p$ environment steps by the means of a latent code $z$ . Figure 1 depicts this framework where the high level frequency $p$ is a random variable, which is one of the contribution of this paper as described in Section 4.4. Note that the class of hierarchical policies we work with is more restrictive than others like the options framework, where the time-commitment is also decided by the policy. Nevertheless, we show that this loss in policy expressivity acts as a regularizer and does not prevent our algorithm from surpassing other state-of-the-art methods.
31
+
32
+ # 3 RELATED WORK
33
+
34
+ There has been growing interest in HRL for the past few decades (Sutton et al., 1999; Precup, 2000), but only recently has it been applied to high-dimensional continuous domains as we do in this work (Kulkarni et al., 2016; Daniel et al., 2016). To obtain the lower level policies, or skills, most methods exploit some additional assumptions, like access to demonstrations (Le et al., 2018; Merel et al., 2019; Ranchod et al., 2015; Sharma et al., 2018), policy sketches (Andreas et al., 2017), or task decomposition into sub-tasks (Ghavamzadeh & Mahadevan, 2003; Sohn et al., 2018). Other methods use a different reward for the lower level, often constraining it to be a "goal reacher" policy, where the signal from the higher level is the goal to reach (Nachum et al., 2018; Levy et al., 2019; Vezhnevets et al., 2017). These methods are very promising for state-reaching tasks, but might require access to goal-reaching reward systems not defined in the original MDP, and are more limited when training on tasks beyond state-reaching. Our method does not require any additional supervision, and the obtained skills are not constrained to be goal-reaching.
35
+
36
+ When transferring skills to a new environment, most HRL methods keep them fixed and simply train a new higher-level on top (Hausman et al., 2018; Heess et al., 2016). Other work allows for building on previous skills by constantly supplementing the set of skills with new ones (Shu et al., 2018), but they require a hand-defined curriculum of tasks, and the previous skills are never fine-tuned.
37
+
38
+ Our algorithm allows for seamless adaptation of the skills, showing no trade-off between leveraging the power of the hierarchy and the final performance in a new task. Other methods use invertible functions as skills (Haarnoja et al., 2018), and therefore a fixed skill can be fully overwritten when a new layer of hierarchy is added on top. This kind of "fine-tuning" is promising, although similar to other works (Peng et al., 2019), they do not apply it to temporally extended skills as we do here.
39
+
40
+ One of the most general frameworks to define temporally extended hierarchies is the options framework (Sutton et al., 1999), and it has recently been applied to continuous state spaces (Bacon et al., 2017). One of the most delicate parts of this formulation is the termination policy, and it requires several regularizers to avoid skill collapse (Harb et al., 2017; Vezhnevets et al., 2016). This modification of the objective may be difficult to tune and affects the final performance. Instead of adding such penalties, we propose to have skills of a random length, not controlled by the agent during training of the skills. The benefit is two-fold: no termination policy to train, and more stable skills that transfer better. Furthermore, these works only used discrete action MDPs. We lift this assumption, and show good performance of our algorithm in complex locomotion tasks. There are other algorithms recently proposed that go in the same direction, but we found them more complex, less principled (their per-action marginalization cannot capture well the temporal correlation within each option), and without available code or evidence of outperforming non-hierarchical methods (Smith et al., 2018).
41
+
42
+ The closest work to ours in terms of final algorithm structure is the one proposed by Frans et al. (2018). Their method can be included in our framework, and hence benefits from our new theoretical insights. We introduce a modification that is shown to be highly beneficial: the random time-commitment mentioned above, and find that our method can learn in difficult environments without their complicated training scheme.
43
+
44
+ # 4 EFFICIENT HIERARCHICAL POLICY GRADIENTS
45
+
46
+ When using a hierarchical policy, the intermediate decision taken by the higher level is not directly applied in the environment. Therefore, technically it should not be incorporated into the trajectory description as an observed variable, like the actions. This makes the policy gradient considerably harder to compute. In this section we first prove that, under mild assumptions, the hierarchical policy gradient can be accurately approximated without needing to marginalize over this latent variable. Then, we derive an unbiased baseline for the policy gradient that can reduce the variance of its estimate. Finally, with these findings, we present our method, Hierarchical Proximal Policy Optimization (HiPPO), an on-policy algorithm for hierarchical policies, allowing learning at all levels of the policy jointly and preventing sub-policy collapse.
47
+
48
+ # 4.1 APPROXIMATE HIERARCHICAL POLICY GRADIENT
49
+
50
+ Policy gradient algorithms are based on the likelihood ratio trick (Williams, 1992) to estimate the gradient of returns with respect to the policy parameters as
51
+
52
+ $$
53
+ \begin{array}{l} \nabla_ {\theta} \eta (\pi_ {\theta}) = \mathbb {E} _ {\tau} \left[ \nabla_ {\theta} \log P (\tau) R (\tau) \right] \approx \frac {1}{N} \sum_ {i = 1} ^ {n} \nabla_ {\theta} \log P (\tau_ {i}) R (\tau_ {i}) (1) \\ = \frac {1}{N} \sum_ {i = 1} ^ {n} \frac {1}{H} \sum_ {t = 1} ^ {H} \nabla_ {\theta} \log \pi_ {\theta} \left(a _ {t} \mid s _ {t}\right) R \left(\tau_ {i}\right) (2) \\ \end{array}
54
+ $$
55
+
56
+ In a temporal hierarchy, a hierarchical policy with a manager $\pi_{\theta_h}(z_t|s_t)$ selects every $p$ time-steps one of $n$ sub-policies to execute. These sub-policies, indexed by $z \in \mathbb{Z}_n$ , can be represented as a single conditional probability distribution over actions $\pi_{\theta_t}(a_t|z_t,s_t)$ . This allows us to not only use a given set of sub-policies, but also leverage skills learned with Stochastic Neural Networks (SNNs) (Florensa et al., 2017a). Under this framework, the probability of a trajectory $\tau = (s_0,a_0,s_1,\dots ,s_H)$ can be written as
57
+
58
+ $$
59
+ P (\tau) = \left(\prod_ {k = 0} ^ {H / p} \left[ \sum_ {j = 1} ^ {n} \pi_ {\theta_ {h}} (z _ {j} | s _ {k p}) \prod_ {t = k p} ^ {(k + 1) p - 1} \pi_ {\theta_ {l}} (a _ {t} | s _ {t}, z _ {j}) \right]\right) \left[ P (s _ {0}) \prod_ {t = 1} ^ {H} P (s _ {t + 1} | s _ {t}, a _ {t}) \right]. \tag {3}
60
+ $$
61
+
62
+ The mixture action distribution, which presents itself as an additional summation over skills, prevents additive factorization when taking the logarithm, as from Eq. 1 to 2. This can yield numerical
63
+
64
+ instabilities due to the product of the $p$ sub-policy probabilities. For instance, in the case where all the skills are distinguishable all the sub-policies' probabilities but one will have small values, resulting in an exponentially small value. In the following Lemma, we derive an approximation of the policy gradient, whose error tends to zero as the skills become more diverse, and draw insights on the interplay of the manager actions.
65
+
66
+ Lemma 1. If the skills are sufficiently differentiated, then the latent variable can be treated as part of the observation to compute the gradient of the trajectory probability. Let $\pi_{\theta_h}(z|s)$ and $\pi_{\theta_l}(a|s,z)$ be Lipschitz functions w.r.t. their parameters, and assume that $0 < \pi_{\theta_l}(a|s,z_j) < \epsilon \forall j \neq kp$ , then
67
+
68
+ $$
69
+ \nabla_ {\theta} \log P (\tau) = \sum_ {k = 0} ^ {H / p} \nabla_ {\theta} \log \pi_ {\theta_ {h}} \left(z _ {k p} \mid s _ {k p}\right) + \sum_ {t = 0} ^ {H} \nabla_ {\theta} \log \pi_ {\theta_ {l}} \left(a _ {t} \mid s _ {t}, z _ {k p}\right) + \mathcal {O} (n H \epsilon^ {p - 1}) \tag {4}
70
+ $$
71
+
72
+ Proof. See Appendix.
73
+
74
+ ![](images/92b28654999b4d92ca4883c43d45cf6f637b417204c32865a01f8957807330ac.jpg)
75
+
76
+ Our assumption can be seen as having diverse skills. Namely, for each action there is just one sub-policy that gives it high probability. In this case, the latent variable can be treated as part of the observation to compute the gradient of the trajectory probability. Many algorithms to extract lower-level skills are based on promoting diversity among the skills (Florensa et al., 2017a; Eysenbach et al., 2019), therefore usually satisfying our assumption. We further analyze how well this assumption holds in our experiments section and Table 2.
77
+
78
+ # 4.2 UNBIASED SUB-POLICY BASELINE
79
+
80
+ The policy gradient estimate obtained when applying the log-likelihood ratio trick as derived above is known to have large variance. A very common approach to mitigate this issue without biasing the estimate is to subtract a baseline from the returns (Peters & Schaal, 2008). It is well known that such baselines can be made state-dependent without incurring any bias. However, it is still unclear how to formulate a baseline for all the levels in a hierarchical policy, since an action dependent baseline does introduce bias in the gradient (Tucker et al., 2018). It has been recently proposed to use latent-conditioned baselines (Weber et al., 2019). Here we go further and prove that, under the assumptions of Lemma 1, we can formulate an unbiased latent dependent baseline for the approximate gradient (Eq. 5).
81
+
82
+ Lemma 2. For any functions $b_h: \mathcal{S} \to \mathbb{R}$ and $b_l: \mathcal{S} \times \mathcal{Z} \to \mathbb{R}$ we have:
83
+
84
+ $$
85
+ \mathbb {E} _ {\tau} [ \sum_ {k = 0} ^ {H / p} \nabla_ {\theta} \log P (z _ {k p} | s _ {k p}) b _ {h} (s _ {k p}) ] = 0 a n d \mathbb {E} _ {\tau} [ \sum_ {t = 0} ^ {H} \nabla_ {\theta} \log \pi_ {\theta_ {l}} (a _ {t} | s _ {t}, z _ {k p}) b _ {l} (s _ {t}, z _ {k p}) ] = 0
86
+ $$
87
+
88
+ Proof. See Appendix.
89
+
90
+ ![](images/1117afa876e82925a232629c1ca56292e98d04aa510e20a8fadaa1f17c7c04c4.jpg)
91
+
92
+ Now we apply Lemma 1 and Lemma 2 to Eq. 1. By using the corresponding value functions as the function baseline, the return can be replaced by the Advantage function $A(s_{kp}, z_{kp})$ (see details in Schulman et al. (2016)), and we obtain the following approximate policy gradient expression:
93
+
94
+ $$
95
+ \hat {g} = \mathbb {E} _ {\tau} \Big [ (\sum_ {k = 0} ^ {H / p} \nabla_ {\theta} \log \pi_ {\theta_ {h}} (z _ {k p} | s _ {k p}) A (s _ {k p}, z _ {k p})) + (\sum_ {t = 0} ^ {H} \nabla_ {\theta} \log \pi_ {\theta_ {t}} (a _ {t} | s _ {t}, z _ {k p}) A (s _ {t}, a _ {t}, z _ {k p})) \Big ]
96
+ $$
97
+
98
+ This hierarchical policy gradient estimate can have lower variance than without baselines, but using it for policy optimization through stochastic gradient descent still yields an unstable algorithm. In the next section, we further improve the stability and sample efficiency of the policy optimization by incorporating techniques from Proximal Policy Optimization (Schulman et al., 2017).
99
+
100
+ # 4.3 HIERARCHICAL PROXIMAL POLICY OPTIMIZATION
101
+
102
+ Using an appropriate step size in policy space is critical for stable policy learning. Modifying the policy parameters in some directions may have a minimal impact on the distribution over actions, whereas small changes in other directions might change its behavior drastically and hurt training
103
+
104
+ Algorithm 1 HiPPO Rollout
105
+ 1: Input: skills $\pi_{\theta_l}(a|s,z)$ , manager $\pi_{\theta_h}(z|s)$ , time-commitment bounds $P_{\mathrm{min}}$ and $P_{\mathrm{max}}$ , horizon $H$
106
+ 2: Reset environment: $s_0 \sim \rho_0$ , $t = 0$ .
107
+ 3: while $t < H$ do
108
+ 4: Sample time-commitment $p \sim \mathrm{Cat}([P_{\mathrm{min}}, P_{\mathrm{max}}])$
109
+ 5: Sample skill $z_t \sim \pi_{\theta_h}(\cdot|s_t)$
110
+ 6: for $t' = t \ldots (t + p)$ do
111
+ 7: Sample action $a_{t'} \sim \pi_{\theta_l}(\cdot|s_{t'}, z_t)$
112
+ 8: Observe new state $s_{t' + 1}$ and reward $r_{t'}$
113
+ 9: end for
114
+ 10: $t \gets t + p$
115
+ 11: end while
116
+ 12: Output: $(s_0, z_0, a_0, s_1, a_1, \ldots, s_H, z_H, a_H, s_{H + 1})$
117
+
118
+ Algorithm 2 HiPPO
119
+ 1: Input: skills $\pi_{\theta_l}(a|s,z)$ , manager $\pi_{\theta_h}(z|s)$ , horizon $H$ , learning rate $\alpha$
120
+ 2: while not done do
121
+ 3: for actor = 1, 2, ..., N do
122
+ 4: Obtain trajectory with HiPPO Rollout
123
+ 5: Estimate advantages $\hat{A}(a_{t'}, s_{t'}, z_t)$ and $\hat{A}(z_t, s_t)$
124
+ 6: end for
125
+ 7: $\theta \gets \theta + \alpha \nabla_\theta L_{HiPPO}^{CLIP}(\theta)$
126
+ 8: end while
127
+
128
+ efficiency (Kakade, 2002). Trust region policy optimization (TRPO) uses a constraint on the KL-divergence between the old policy and the new policy to prevent this issue (Schulman et al., 2015). Unfortunately, hierarchical policies are generally represented by complex distributions without closed form expressions for the KL-divergence. Therefore, to improve the stability of our hierarchical policy gradient we turn towards Proximal Policy Optimization (PPO) (Schulman et al., 2017). PPO is a more flexible and compute-efficient algorithm. In a nutshell, it replaces the KL-divergence constraint with a cost function that achieves the same trust region benefits, but only requires the computation of the likelihood. Letting $w_{t}(\theta) = \frac{\pi_{\theta}(a_{t}|s_{t})}{\pi_{\theta_{old}}(a_{t}|s_{t})}$ , the PPO objective is:
129
+
130
+ $$
131
+ L ^ {C L I P} (\theta) = \mathbb {E} _ {t} \min \left\{w _ {t} (\theta) A _ {t}, \operatorname {c l i p} (w _ {t} (\theta), 1 - \epsilon , 1 + \epsilon) A _ {t} \right\}
132
+ $$
133
+
134
+ We can adapt our approximated hierarchical policy gradient with the same approach by letting $w_{h, kp}(\theta) = \frac{\pi_{\theta_h}(z_{kp}|s_{kp})}{\pi_{\theta_{h, old}}(z_{kp}|s_{kp})}$ and $w_{l,t}(\theta) = \frac{\pi_{\theta_l}(a_t|s_t,z_{kp})}{\pi_{\theta_{l, old}}(a_t|s_t,z_{kp})}$ , and using the super-index clip to denote the clipped objective version, we obtain the new surrogate objective:
135
+
136
+ $$
137
+ \begin{array}{l} L _ {H i P P O} ^ {C L I P} (\theta) = \mathbb {E} _ {\tau} \Big [ \sum_ {k = 0} ^ {H / p} \min \left\{w _ {h, k p} (\theta) A (s _ {k p}, z _ {k p}), w _ {h, k p} ^ {\mathrm {c l i p}} (\theta) A (s _ {k p}, z _ {k p}) \right\} \\ \left. + \sum_ {t = 0} ^ {H} \min \left\{w _ {l, t} (\theta) A \left(s _ {t}, a _ {t}, z _ {k p}\right), w _ {l, t} ^ {\text {c l i p}} (\theta) A \left(s _ {t}, a _ {t}, z _ {k p}\right) \right\} \right] \\ \end{array}
138
+ $$
139
+
140
+ We call this algorithm Hierarchical Proximal Policy Optimization (HiPPO). Next, we introduce a critical additions: a switching of the time-commitment between skills.
141
+
142
+ # 4.4 VARYING TIME-COMMITMENT
143
+
144
+ Most hierarchical methods either consider a fixed time-commitment to the lower level skills (Florensa et al., 2017a; Frans et al., 2018), or implement the complex options framework (Precup, 2000; Bacon et al., 2017). In this work we propose an in-between, where the time-commitment to the skills is a random variable sampled from a fixed distribution Categorical $(T_{\mathrm{min}}, T_{\mathrm{max}})$ just before the manager takes a decision. This modification does not hinder final performance, and we show it improves zero-shot adaptation to a new task. This approach to sampling rollouts is detailed in Algorithm 1. The full algorithm is detailed in Algorithm 2.
145
+
146
+ # 5 EXPERIMENTS
147
+
148
+ We designed our experiments to answer the following questions: 1) How does HiPPO compare against a flat policy when learning from scratch? 2) Does it lead to policies more robust to environment changes? 3) How well does it adapt already learned skills? and 4) Does our skill diversity assumption hold in practice?
149
+
150
+ ![](images/236c8eac7e992e7607459e30caf68b20392779fe2c16500ff94a6f9b7a2cc322.jpg)
151
+ (a) Block Hopper
152
+
153
+ ![](images/ca5d992c2dd6dff2bb9deb892c3c5fa1b2ebc82764cb309ff6f5eaefabdca3cb.jpg)
154
+ (b) Block Half Cheetah
155
+
156
+ ![](images/9f2d56dbb1f46e0fbced2fa61dfae5aa4c12e11debdf55c195fd223217ccc6b3.jpg)
157
+ (c) Snake Gather
158
+
159
+ ![](images/9fc0a2be0677ee6e1a1ff225598555341009b3daa8126ee463b30f728090fb6b.jpg)
160
+ (d) Ant Gather
161
+
162
+ ![](images/b639b0e52093ea28031190daed02703ea217817cbb8006d45d73111c729afc08.jpg)
163
+ Figure 2: Environments used to evaluate the performance of our method. Every episode has a different configuration: wall heights for (a)-(b), ball positions for (c)-(d)
164
+ (a) Block Hopper
165
+ Figure 3: Analysis of different time-commitment strategies on learning from scratch.
166
+
167
+ ![](images/6c03fe8ce434f24b605c4270f888da33a5471300afbf36003a3aacdd15e57382.jpg)
168
+ (b) Block Half Cheetah
169
+
170
+ ![](images/47b1d25ac781ad216113e3a4c77608713958082e9f8f4d04e14d15b5f4b6bbfd.jpg)
171
+ (c) Snake Gather
172
+
173
+ ![](images/38a7333526461e6c2d51e248e0e2c414b5f9219d16ae2081d0411463199bfbf9.jpg)
174
+ (d) Ant Gather
175
+
176
+ # 5.1 TASKS
177
+
178
+ We evaluate our approach on a variety of robotic locomotion and navigation tasks. The Block environments, depicted in Fig. 2a-2b, have walls of random heights at regular intervals, and the objective is to learn a gait for the Hopper and Half-Cheetah robots to jump over them. The agents observe the height of the wall ahead and their proprioceptive information (joint positions and velocities), receiving a reward of $+1$ for each wall cleared. The Gather environments, described by Duan et al. (2016), require agents to collect apples (green balls, $+1$ reward) while avoiding bombs (red balls, -1 reward). The only available perception beyond proprioception is through a LIDAR-type sensor indicating at what distance are the objects in different directions, and their type, as depicted in the bottom left corner of Fig. 2c-2d. This is challenging hierarchical task with sparse rewards that requires simultaneously learning perception, locomotion, and higher-level planning capabilities. We use the Snake and Ant robots in Gather. Details for all robotic agents are provided in Appendix B.
179
+
180
+ # 5.2 LEARNING FROM SCRATCH AND TIME-COMMITMENT
181
+
182
+ In this section, we study the benefit of using our HiPPO algorithm instead of standard PPO on a flat policy (Schulman et al., 2017). The results, reported in Figure 3, demonstrate that training from scratch with HiPPO leads to faster learning and better performance than flat PPO. Furthermore, we show that the benefit of HiPPO does not just come from having temporally correlated exploration: PPO with action repeat converges at a lower performance than our method. HiPPO leverages the time-commitment more efficiently, as suggested by the poor performance of the ablation where we set $p = 1$ , when the manager takes an action every environment step as well. Finally, Figure 4 shows the effectiveness of using the presented skill-dependent baseline.
183
+
184
+ # 5.3 COMPARISON TO OTHER METHODS
185
+
186
+ We compare HiPPO to current state-of-the-art hierarchical methods. First, we evaluate HIRO (Nachum et al., 2018), an off-policy RL method based on training a goal-reaching lower level policy. Fig. 5 shows that HIRO achieves poor performance on our tasks. As further detailed in Appendix D, this algorithm is sensitive to access to ground-truth information, like the exact $(x,y)$ position of the robot in Gather. In contrast, our method is able to perform well directly from the raw sensory inputs described in Section 5.1. We evaluate Option-Critic (Bacon et al., 2017), a variant of the options framework (Sutton et al., 1999) that can be used for continuous action-spaces. It fails to learn, and we hypothesize that their algorithm provides less time-correlated exploration and learns
187
+
188
+ ![](images/68e670cc6d17765c3bb907e97793225f259de1f57497addc3d2c4dc864ff6f0f.jpg)
189
+ (a) Block Hopper
190
+
191
+ ![](images/e8dd521171ae3c468f3e7028b25efdf83fc4d0dc1548099929b4126222c46a5d.jpg)
192
+ (b) Block Half Cheetah
193
+
194
+ ![](images/59ca0191a3ac3c1b177dc41c5f29871516818d20d7fba87d99d784192bccf0d0.jpg)
195
+ (c) Snake Gather
196
+
197
+ ![](images/ad90ee52db5a5827279ddc60f12a30549fea82a5d1b673d190878fca28a2ac2f.jpg)
198
+ (d) Ant Gather
199
+
200
+ ![](images/51b39247785f2d9d2165cf7cf4087ea106477ad5ab5fef88cf129129cf926a73.jpg)
201
+ Figure 4: Using a skill-conditioned baseline, as defined in Section 4.2, generally improves performance of HiPPO when learning from scratch.
202
+ (a) Block Hopper
203
+ Figure 5: Comparison of HiPPO and HierVPG to prior hierarchical methods on learning from scratch.
204
+
205
+ ![](images/bc94d9f83d079ff44f772c987a4df30b37b43250f5e42717a1c010a96279a293.jpg)
206
+ (b) Block Half Cheetah
207
+
208
+ ![](images/98275faf912ce2603e16ff22f738e1ccf9d76e1a201678017c958f6d96d8f2ad.jpg)
209
+ (c) Snake Gather
210
+
211
+ ![](images/5d27007cf6687ba72bdb7474f68c0ddbc520d2161c7d8216aeb1fad5d80509f7.jpg)
212
+ (d) Ant Gather
213
+
214
+ less diverse skills. We also compare against MLSH (Frans et al., 2018), which repeatedly samples new environment configurations to learn primitive skills. We take these hyperparameters from their Ant Twowalk experiment: resetting the environment configuration every 60 iterations, a warmup period of 20 during which only the manager is trained, and a joint training period of 40 during which both manager and skills are trained. Our results show that such a training scheme does not provide any benefits. Finally, we provide a comparison to a direct application of our Hierarchical Vanilla Policy Gradient (HierVPG) algorithm, and we see that the algorithm is unstable without PPO's trust-region-like technique.
215
+
216
+ # 5.4 ROBUSTNESS TO DYNAMICS PERTURBATIONS
217
+
218
+ We investigate the robustness of HiPPO to changes in the dynamics of the environment. We perform several modifications to the base Snake Gather and Ant Gather environments. One at a time, we change the body mass, dampening of the joints, body inertia, and friction characteristics of both robots. The results, presented in Table 1, show that HiPPO with randomized period Categorical $\left([T_{\min}, T_{\max}]\right)$ is able to better handle these dynamics changes. In terms of the drop in policy performance between the training environment and test environment, it outperforms HiPPO with fixed period on 6 out of 8 related tasks. These results suggest that the randomized period exposes the policy to a wide range of scenarios, which makes it easier to adapt when the environment changes.
219
+
220
+ <table><tr><td>Gather</td><td>Algorithm</td><td>Initial</td><td>Mass</td><td>Dampening</td><td>Inertia</td><td>Friction</td></tr><tr><td rowspan="3">Snake</td><td>Flat PPO</td><td>2.72</td><td>3.16 (+16%)</td><td>2.75 (+1%)</td><td>2.11 (-22%)</td><td>2.75 (+1%)</td></tr><tr><td>HiPPO, p = 10</td><td>4.38</td><td>3.28 (-25%)</td><td>3.27 (-25%)</td><td>3.03 (-31%)</td><td>3.27 (-25%)</td></tr><tr><td>HiPPO random p</td><td>5.11</td><td>4.09 (-20%)</td><td>4.03 (-21%)</td><td>3.21 (-37%)</td><td>4.03 (-21%)</td></tr><tr><td rowspan="3">Ant</td><td>Flat PPO</td><td>2.25</td><td>2.53 (+12%)</td><td>2.13 (-5%)</td><td>2.36 (+5%)</td><td>1.96 (-13%)</td></tr><tr><td>HiPPO, p = 10</td><td>3.84</td><td>3.31 (-14%)</td><td>3.37 (-12%)</td><td>2.88 (-25%)</td><td>3.07 (-20%)</td></tr><tr><td>HiPPO random p</td><td>3.22</td><td>3.37 (+5%)</td><td>2.57 (-20%)</td><td>3.36 (+4%)</td><td>2.84 (-12%)</td></tr></table>
221
+
222
+ Table 1: Zero-shot transfer performance. The final return in the initial environment is shown, as well as the average return over 25 rollouts in each new modified environment.
223
+
224
+ # 5.5 ADAPTATION OF PRE-TRAINED SKILLS
225
+
226
+ For the Block task, we use DIAYN (Eysenbach et al., 2019) to train 6 differentiated subpolicies in an environment without any walls. Here, we see if these diverse skills can improve performance on a downstream task that's out of the training distribution. For Gather, we take 6 pretrained
227
+
228
+ ![](images/8aadbafa438b1970698ef6544f75f68cc64bc50cf7b13bf9e26648440dd430c1.jpg)
229
+ (a) Block Hopper
230
+
231
+ ![](images/ed6142d34df525fd93c260a3a2de8d7976e7b81f2561af53a9c82c3215b92d14.jpg)
232
+ Figure 6: Benefit of adapting some given skills when the preferences of the environment are different from those of the environment where the skills were originally trained. Adapting skills with HiPPO has better learning performance than leaving the skills fixed or learning from scratch.
233
+
234
+ ![](images/91f8800c3409af79bda465ad73d949dedbc4719f9d741e0e8a430eebb2d52a59.jpg)
235
+ (b) Block Half Cheetah
236
+ (c) Snake Gather
237
+
238
+ ![](images/5f5872f2f2a1e4ce25bf6f1fe10115da19c373231d67f2d5202ec9ad5588febf.jpg)
239
+ (d) Ant Gather
240
+
241
+ subpolicies encoded by a Stochastic Neural Network (Tang & Salakhutdinov, 2013) that was trained in a diversity-promoting environment (Florensa et al., 2017a). We fine-tune them with HiPPO on the Gather environment, but with an extra penalty on the velocity of the Center of Mass. This can be understood as a preference for cautious behavior. This requires adjustment of the sub-policies, which were trained with a proxy reward encouraging them to move as far as possible (and hence quickly). Fig. 6 shows that using HiPPO to simultaneously train a manager and fine-tune the skills achieves higher final performance than fixing the sub-policies and only training a manager with PPO. The two initially learn at the same rate, but HiPPO's ability to adjust to the new dynamics allows it to reach a higher final performance. Fig. 6 also shows that HiPPO can fine-tune the same given skills better than Option-Critic (Bacon et al., 2017), MLSH (Frans et al., 2018), and HIRO (Nachum et al., 2018).
242
+
243
+ # 5.6 SKILL DIVERSITY ASSUMPTION
244
+
245
+ In Lemma 1, we derived a more efficient and numerically stable gradient by assuming that the sub-policies are diverse. In this section, we empirically test the validity of our assumption and the quality of our approximation. We run the HiPPO algorithm on Ant Gather and Snake Gather both from scratch and with given pretrained skills, as done in the previous section. In Table 2, we report the average maximum probability under other sub-policies, corresponding to $\epsilon$ from the assumption. In all settings, this is on the order of magnitude of 0.1. Therefore, under the $p\approx 10$ that we use in our experiments, the term we neglect has a factor $\epsilon^{p - 1} = 10^{-10}$ . It is not surprising then that the average cosine similarity between the full gradient and our approximation is almost 1, as reported in Table 2.
246
+
247
+ <table><tr><td>Gather</td><td>Algorithm</td><td>Cosine Sim.</td><td>\( \max_{z&#x27; \neq z_{kp}} \pi_{\theta_t}(a_t|s_t,z&#x27;) \)</td><td>\( \pi_{\theta_t}(a_t|s_t,z_{kp}) \)</td></tr><tr><td rowspan="2">Snake</td><td>HiPPO on given skills</td><td>0.98 ± 0.01</td><td></td><td>0.44 ± 0.03</td></tr><tr><td>HiPPO on random skills</td><td>0.97 ± 0.03</td><td></td><td>0.32 ± 0.04</td></tr><tr><td rowspan="2">Ant</td><td>HiPPO on given skills</td><td>0.96 ± 0.04</td><td></td><td>0.40 ± 0.08</td></tr><tr><td>HiPPO on random skills</td><td>0.94 ± 0.03</td><td></td><td>0.31 ± 0.09</td></tr></table>
248
+
249
+ Table 2: Empirical evaluation of Lemma 1. In the middle and right columns, we evaluate the quality of our assumption by computing the largest probability of a certain action under other skills $(\epsilon)$ , and the action probability under the actual latent. We also report the cosine similarity between our approximate gradient and the exact gradient from Eq. 3. The mean and standard deviation of these values are computed over the full batch collected at iteration 10.
250
+
251
+ # 6 CONCLUSIONS AND FUTURE WORK
252
+
253
+ In this paper, we examined how to effectively adapt temporal hierarchies. We began by deriving a hierarchical policy gradient and its approximation. We then proposed a new method, HiPPO, that can stably train multiple layers of a hierarchy jointly. The adaptation experiments suggest that we can optimize pretrained skills for downstream environments, and learn emergent skills without any unsupervised pre-training. We also demonstrate that HiPPO with randomized period can learn from scratch on sparse-reward and long time horizon tasks, while outperforming non-hierarchical methods on zero-shot transfer.
254
+
255
+ # REFERENCES
256
+
257
+ Jacob Andreas, Dan Klein, and Sergey Levine. Modular Multitask Reinforcement Learning with Policy Sketches. International Conference in Machine Learning, 2017. URL http://github.com/.
258
+ Pierre-Luc Bacon, Jean Harb, and Doina Precup. The Option-Critic Architecture. AAAI, pp. 1726-1734, 2017. URL http://arxiv.org/abs/1609.05140.
259
+ Christian Daniel, Herke van Hoof, Jan Peters, Gerhard Neumann, Thomas Gartner, Mirco Nanni, Andrea Passerini, and Celine B Robardet Christian Daniel ChristianDaniel. Probabilistic inference for determining options in reinforcement learning. Machine Learning, 104(104), 2016. doi: 10.1007/s10994-016-5580-x.
260
+ Peter Dayan and Geoffrey E. Hinton. Feudal Reinforcement Learning. Advances in Neural Information Processing Systems, pp. 271-278, 1993. ISSN 0143991X. doi: 10.1108/IR-08-2017-0143. URL http://www.cs.toronto.edu/~fritz/absps/dh93.pdf.
261
+ Yan Duan, Xi Chen, John Schulman, and Pieter Abbeel. Benchmarking Deep Reinforcement Learning for Continuous Control. International Conference in Machine Learning, 2016. URL http://arxiv.org/abs/1604.06778.
262
+ Benjamin Eysenbach, Abhishek Gupta, Julian Ibarz, and Sergey Levine. Diversity is All You Need: Learning Skills without a Reward Function. International Conference in Learning Representations, 2019. URL http://arxiv.org/abs/1802.06070.
263
+ Carlos Florensa, Yan Duan, and Pieter Abbeel. Stochastic Neural Networks for Hierarchical Reinforcement Learning. International Conference in Learning Representations, pp. 1-17, 2017a. ISSN 14779129. doi: 10.1002/rcm.765. URL http://arxiv.org/abs/1704.03012.
264
+ Carlos Florensa, David Held, Markus Wulfmeier, Michael Zhang, and Pieter Abbeel. Reverse Curriculum Generation for Reinforcement Learning. Conference on Robot Learning, pp. 1-16, 2017b. ISSN 1938-7228. doi: 10.1080/00908319208908727. URL http://arxiv.org/abs/1707.05300.
265
+ Carlos Florensa, Jonas Degrave, Nicolas Heess, Jost Tobias Springenberg, and Martin Riedmiller. Self-supervised Learning of Image Embedding for Continuous Control. In Workshop on Inference to Control at NeurIPS, 2018. URL http://arxiv.org/abs/1901.00943.
266
+ Kevin Frans, Jonathan Ho, Xi Chen, Pieter Abbeel, and John Schulman. Meta Learning Shared Hierarchies. International Conference in Learning Representations, pp. 1-11, 2018. ISSN 14639076. doi: 10.1039/b203755f. URL http://arxiv.org/abs/1710.09767.
267
+ Mohammad Ghavamzadeh and Sridhar Mahadevan. Hierarchical Policy Gradient Algorithms. International Conference in Machine Learning, 2003. URL http://chercheurs.lille.inria.fr/~ghavamza/my网站建设/Publications_files/icml03.pdf.
268
+ Tuomas Haarnoja, Kristian Hartikainen, Pieter Abbeel, and Sergey Levine. Latent Space Policies for Hierarchical Reinforcement Learning. Internation Conference in Machine Learning, 2018. URL http://arxiv.org/abs/1804.02808.
269
+ Jean Harb, Pierre-Luc Bacon, Martin Klissarov, and Doina Precup. When Waiting is not an Option : Learning Options with a Deliberation Cost. AAAI, 9 2017. URL http://arxiv.org/abs/ 1709.04571.
270
+ Karol Hausman, Jost Tobias Springenberg, Ziyu Wang, Nicolas Heess, and Martin Riedmiller. Learning an Embedding Space for Transferable Robot Skills. International Conference in Learning Representations, pp. 1-16, 2018.
271
+ Nicolas Heess, Greg Wayne, Yuval Tassa, Timothy Lillicrap, Martin Riedmiller, David Silver, and Google Deepmind. Learning and Transfer of Modulated Locomotor Controllers. 2016. URL https://arxiv.org/abs/1610.05182.
272
+
273
+ Nicolas Heess, Dhruva TB, Srinivasan Sriram, Jay Lemmon, Josh Merel, Greg Wayne, Yuval Tassa, Tom Erez, Ziyu Wang, S. M. Ali Eslami, Martin Riedmiller, and David Silver. Emergence of Locomotion Behaviours in Rich Environments. 7 2017. URL http://arxiv.org/abs/1707.02286.
274
+ Sham Kakade. A Natural Policy Gradient. Advances in Neural Information Processing Systems, 2002.
275
+ Tejas D Kulkarni, Karthik R Narasimhan, Ardavan Saeedi CSAIL, and Joshua B Tenenbaum BCS. Hierarchical Deep Reinforcement Learning: Integrating Temporal Abstraction and Intrinsic Motivation. Advances in Neural Information Processing Systems, pp. 1-13, 2016.
276
+ Hoang M Le, Nan Jiang, Alekh Agarwal, Miroslav Dud, and Yue Hal. Hierarchical Imitation and Reinforcement Learning. International Conference in Machine Learning, 2018.
277
+ Andrew Levy, Robert Platt, and Kate Saenko. Hierarchical Actor-Critic. arXiv:1712.00948, 12 2017. URL http://arxiv.org/abs/1712.00948.
278
+ Andrew Levy, Robert Platt, and Kate Saenko. Hierarchical Reinforcement Learning with Hindsight. International Conference on Learning Representations, 5 2019. URL http://arxiv.org/abs/1805.08180.
279
+ Josh Merel, Arun Ahuja, Vu Pham, Saran Tunyasuvunakool, Siqi Liu, Dhruva Tirumala, Nicolas Heess, and Greg Wayne. Hierarchical visuomotor control of humanoids. International Conference in Learning Representations, 2019. URL http://arxiv.org/abs/1811.09656.
280
+ Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei a Rusu, Joel Veness, Marc G Bellemare, Alex Graves, Martin Riedmiller, Andreas K Fidjeland, Georg Ostrovski, Stig Petersen, Charles Beattie, Amir Sadik, Ioannis Antonoglou, Helen King, Dharshan Kumaran, Daan Wierstra, Shane Legg, and Demis Hassabis. Human-level control through deep reinforcement learning. Nature, 518(7540):529-533, 2015.
281
+ Ofir Nachum, Honglak Lee, Shane Gu, and Sergey Levine. Data-Efficient Hierarchical Reinforcement Learning. Advances in Neural Information Processing Systems, 2018.
282
+ Ashvin Nair, Vitchyr Pong, Murtaza Dalal, Shikhar Bahl, Steven Lin, and Sergey Levine. Visual Reinforcement Learning with Imagined Goals. *Adavances in Neural Information Processing Systems*, 2018.
283
+ OpenAI, Marcin Andrychowicz, Bowen Baker, Maciek Chogiej, Rafal Jozefowicz, Bob McGrew, Jakub Pachocki, Arthur Petron, Matthias Plappert, Glenn Powell, and Alex Ray. Learning Dexterous In-Hand Manipulation. pp. 1-27, 2018.
284
+ Xue Bin Peng, Michael Chang, Grace Zhang, Pieter Abbeel, and Sergey Levine. MCP: Learning Composable Hierarchical Control with Multiplicative Compositional Policies. 5 2019. URL http://arxiv.org/abs/1905.09808.
285
+ Jan Peters and Stefan Schaal. Natural Actor-Critic. Neurocomputing, 71(7-9):1180-1190, 2008. ISSN 09252312. doi: 10.1016/j.neucom.2007.11.026.
286
+ Doina Precup. Temporal abstraction in reinforcement learning, 1 2000. URL https:// scholarworks.umass.edu/dissertations/AAI9978540.
287
+ Pravesh Ranchod, Benjamin Rosman, and George Konidaris. Nonparametric Bayesian Reward Segmentation for Skill Discovery Using Inverse Reinforcement Learning. 2015. ISSN 21530866. doi: 10.1109/IROS.2015.7353414.
288
+ John Schulman, Philipp Moritz, Michael Jordan, and Pieter Abbeel. Trust Region Policy Optimization. International Conference in Machine Learning, 2015.
289
+ John Schulman, Philipp Moritz, Sergey Levine, Michael I Jordan, and Pieter Abbeel. HIGH-DIMENSIONAL CONTINUOUS CONTROL USING GENERALIZED ADVANTAGE ESTIMATION. International Conference in Learning Representations, pp. 1-14, 2016.
290
+
291
+ John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal Policy Optimization Algorithms. 2017. URL https://openai-public.s3-us-west-2.amazon.com/blog/2017-07/ppo/ppo-arxiv.pdf.
292
+ Arjun Sharma, Mohit Sharma, Nicholas Rhinehart, and Kris M Kitani. Directed-Info GAIL: Learning Hierarchical Policies from Unsegmented Demonstrations using Directed Information. International Conference in Learning Representations, 2018. URL http://arxiv.org/abs/1810.01266.
293
+ Tianmin Shu, Caiming Xiong, and Richard Socher. Hierarchical and interpretable skill acquisition in multi-task reinforcement Learning. International Conference in Learning Representations, 3:1-13, 2018. doi: 10.1109/MWC.2016.7553036.
294
+ David Silver, Julian Schrittwieser, Karen Simonyan, Ioannis Antonoglou, Aja Huang, Arthur Guez, Thomas Hubert, Lucas Baker, Matthew Lai, Adrian Bolton, Yutian Chen, Timothy Lillicrap, Fan Hui, Laurent Sifre, George Van Den Driessche, Thore Graepel, and Demis Hassabis. Mastering the game of Go without human knowledge. Nature, 550(7676):354-359, 10 2017. ISSN 14764687. doi: 10.1038/nature24270. URL http://arxiv.org/abs/1610.00633.
295
+ Matthew J. A. Smith, Herke van Hoof, and Joelle Pineau. An inference-based policy gradient method for learning options, 2 2018. URL https://openreview.net/forum?id=rJIgf7bAZ.
296
+ Sungryull Sohn, Junhyuk Oh, and Honglak Lee. Multitask Reinforcement Learning for Zero-shot Generalization with Subtask Dependencies. Advances in Neural Information Processing Systems, 2018.
297
+ Richard S Sutton, Doina Precup, and Satinder Singh. Between MDPs and semi-MDPs: A framework for temporal abstraction in reinforcement learning. Artificial Intelligence, 112: 181-211, 1999. URL http://www-anw.cs.umass.edu/~barto/courses/cs687/Sutton-Precup-Singh-AIJ99.pdf.
298
+ Yichuan Tang and Ruslan Salakhutdinov. Learning Stochastic Feedforward Neural Networks. Advances in Neural Information Processing Systems, 2:530-538, 2013. doi: 10.1.1.63.1777.
299
+ Emanuel Todorov, Tom Erez, and Yuval Tassa. MuJoCo: A physics engine for model-based control. pp. 5026-5033, 2012.
300
+ George Tucker, Surya Bhupatiraju, Shixiang Gu, Richard E Turner, Zoubin Ghahramani, and Sergey Levine. The Mirage of Action-Dependent Baselines in Reinforcement Learning. Internation Conference in Machine Learning, 2018. URL http://arxiv.org/abs/1802.10031.
301
+ Alexander Vezhnevets, Volodymyr Mnih, John Agapiou, Simon Osindero, Alex Graves, Oriol Vinyals, and Koray Kavukcuoglu Google DeepMind. Strategic Attentive Writer for Learning Macro-Actions. Advances in Neural Information Processing Systems, 2016.
302
+ Alexander Sasha Vezhnevets, Simon Osindero, Tom Schaul, Nicolas Heess, Max Jaderberg, David Silver, and Koray Kavukcuoglu. Feudal Networks for Hierarchical Reinforcement Learning. International Conference in Machine Learning, 2017. URL https://arxiv.org/pdf/1703.01161.pdf.
303
+ Théophane Weber, Nicolas Heess, Lars Buesing, and David Silver. Credit Assignment Techniques in Stochastic Computation Graphs. 1 2019. URL http://arxiv.org/abs/1901.01761.
304
+ Ronald J Williams. Simple Statistical Gradient-Following Algorithms for Connectionist Reinforcement Learning. Machine Learning, 8(3-4):229-256, 1992.
305
+
306
+ # A HYPERPARAMETERS AND ARCHITECTURES
307
+
308
+ The Block environments used a horizon of 1000 and a batch size of 50,000, while Gather used a batch size of 100,000. Ant Gather has a horizon of 5000, while Snake Gather has a horizon of 8000 due to its larger size. For all experiments, both PPO and HiPPO used learning rate $3 \times 10^{-3}$ , clipping parameter $\epsilon = 0.1$ , 10 gradient updates per iteration, and discount $\gamma = 0.999$ . The learning rate, clipping parameter, and number of gradient updates come from the OpenAI Baselines implementation.
309
+
310
+ HiPPO used $n = 6$ sub-policies. HiPPO uses a manager network with 2 hidden layers of 32 units, and a skill network with 2 hidden layers of 64 units. In order to have roughly the same number of parameters for each algorithm, flat PPO uses a network with 2 hidden layers with 256 and 64 units respectively. For HiPPO with randomized period, we resample $p \sim \mathrm{Uniform}\{5,15\}$ every time the manager network outputs a latent, and provide the number of timesteps until the next latent selection as an input into both the manager and skill networks. The single baselines and skill-dependent baselines used a MLP with 2 hidden layers of 32 units to fit the value function. The skill-dependent baseline receives, in addition to the full observation, the active latent code and the time remaining until the next skill sampling. All runs used five random seeds.
311
+
312
+ # B ROBOT AGENT DESCRIPTION
313
+
314
+ Hopper is a 3-link robot with a 14-dimensional observation space and a 3-dimensional action space. Half-Cheetah has a 20-dimensional observation space and a 6-dimensional action space. We evaluate both of these agents on a sparse block hopping task. In addition to observing their own joint angles and positions, they observe the height and length of the next wall, the x-position of the next wall, and the distance to the wall from the agent. We also provide the same wall observations for the previous wall, which the agent can still interact with.
315
+
316
+ Snake is a 5-link robot with a 17-dimensional observation space and a 4-dimensional action space. Ant is a quadrupedal robot with a 27-dimensional observation space and a 8-dimensional action space. Both Ant and Snake can move and rotate in all directions, and Ant faces the added challenge of avoiding falling over irrecoverably. In the Gather environment, agents also receive 2 sets of 10-dimensional lidar observations, which correspond to separate apple and bomb observations. The observation displays the distance to the nearest apple or bomb in each $36^{\circ}$ bin, respectively. All environments are simulated with the physics engine MuJoCo (Todorov et al., 2012).
317
+
318
+ # C PROOFS
319
+
320
+ Lemma 1. If the skills are sufficiently differentiated, then the latent variable can be treated as part of the observation to compute the gradient of the trajectory probability. Concretely, if $\pi_{\theta_h}(z|s)$ and $\pi_{\theta_l}(a|s,z)$ are Lipschitz in their parameters, and $0 < \pi_{\theta_l}(a_t|s_t,z_j) < \epsilon \forall j \neq kp$ , then
321
+
322
+ $$
323
+ \nabla_ {\theta} \log P (\tau) = \sum_ {k = 0} ^ {H / p} \nabla_ {\theta} \log \pi_ {\theta_ {h}} \left(z _ {k p} \mid s _ {k p}\right) + \sum_ {t = 1} ^ {p} \nabla_ {\theta} \log \pi_ {\theta_ {l}} \left(a _ {t} \mid s _ {t}, z _ {k p}\right) + \mathcal {O} (n H \epsilon^ {p - 1}) \tag {5}
324
+ $$
325
+
326
+ Proof. From the point of view of the MDP, a trajectory is a sequence $\tau = (s_0, a_0, s_1, a_1, \ldots, a_{H-1}, s_H)$ . Let's assume we use the hierarchical policy introduced above, with a higher-level policy modeled as a parameterized discrete distribution with $n$ possible outcomes $\pi_{\theta_h}(z|s) = \text{Categorical}_{\theta_h}(n)$ . We can expand $P(\tau)$ into the product of policy and environment dynamics terms, with $z_j$ denoting the $j$ th possible value out of the $n$ choices,
327
+
328
+ $$
329
+ P (\tau) = \Bigg (\prod_ {k = 0} ^ {H / p} \Big [ \sum_ {j = 1} ^ {n} \pi_ {\theta_ {h}} (z _ {j} | s _ {k p}) \prod_ {t = k p} ^ {(k + 1) p - 1} \pi_ {\theta_ {l}} (a _ {t} | s _ {t}, z _ {j}) \Big ] \Bigg) \Bigg [ P (s _ {0}) \prod_ {t = 1} ^ {H} P (s _ {t + 1} | s _ {t}, a _ {t}) \Bigg ]
330
+ $$
331
+
332
+ Taking the gradient of $\log P(\tau)$ with respect to the policy parameters $\theta = [\theta_h, \theta_l]$ , the dynamics terms disappear, leaving:
333
+
334
+ $$
335
+ \begin{array}{l} \nabla_ {\theta} \log P (\tau) = \sum_ {k = 0} ^ {H / p} \nabla_ {\theta} \log \left(\sum_ {j = 1} ^ {n} \pi_ {\theta_ {l}} (z _ {j} | s _ {k p}) \prod_ {t = k p} ^ {(k + 1) p - 1} \pi_ {s, \theta} (a _ {t} | s _ {t}, z _ {j})\right) \\ = \sum_ {k = 0} ^ {H / p} \frac {1}{\sum_ {j = 1} ^ {n} \pi_ {\theta_ {h}} (z _ {j} | s _ {k p}) \prod_ {t = k p} ^ {(k + 1) p - 1} \pi_ {\theta_ {l}} (a _ {t} | s _ {t} , z _ {j})} \sum_ {j = 1} ^ {n} \nabla_ {\theta} \left(\pi_ {\theta_ {h}} (z _ {j} | s _ {k p}) \prod_ {t = k p} ^ {(k + 1) p - 1} \pi_ {\theta_ {l}} (a _ {t} | s _ {t}, z _ {j})\right) \\ \end{array}
336
+ $$
337
+
338
+ The sum over possible values of $z$ prevents the logarithm from splitting the product over the $p$ -step sub-trajectories. This term is problematic, as this product quickly approaches 0 as $p$ increases, and suffers from considerable numerical instabilities. Instead, we want to approximate this sum of products by a single one of the terms, which can then be decomposed into a sum of logs. For this we study each of the terms in the sum: the gradient of a sub-trajectory probability under a specific latent $\nabla_{\theta}\Big(\pi_{\theta_h}(z_j|s_{kp})\prod_{t = kp}^{(k + 1)p - 1}\pi_{\theta_l}(a_t|s_t,z_j)\Big)$ . Now we can use the assumption that the skills are easy to distinguish, $0 < \pi_{\theta_l}(a_t|s_t,z_j) < \epsilon \forall j\neq kp$ . Therefore, the probability of the sub-trajectory under a latent different than the one that was originally sampled $z_{j}\neq z_{kp}$ , is upper bounded by $\epsilon^p$ . Taking the gradient, applying the product rule, and the Lipschitz continuity of the policies, we obtain that for all $z_{j}\neq z_{kp}$
339
+
340
+ $$
341
+ \begin{array}{l} \nabla_ {\theta} \Big (\pi_ {\theta_ {h}} (z _ {j} | s _ {k p}) \prod_ {t = k p} ^ {(k + 1) p - 1} \pi_ {\theta_ {l}} (a _ {t} | s _ {t}, z _ {j}) \Big) = \nabla_ {\theta} \pi_ {\theta_ {h}} (z _ {j} | s _ {k p}) \prod_ {t = k p} ^ {(k + 1) p - 1} \pi_ {\theta_ {l}} (a _ {t} | s _ {t}, z _ {j}) + \\ \sum_{t = kp}^{(k + 1)p - 1}\pi_{\theta_{h}}(z_{j}|s_{kp})\bigl(\nabla_{\theta}\pi_{\theta_{l}}(a_{t}|s_{t},z_{j})\bigr)\prod_{\substack{t = kp\\ t^{\prime}\neq t}}^{(k + 1)p - 1}\pi_{\theta_{l}}(a_{t^{\prime}}|s_{t^{\prime}},z_{j}) \\ = \mathcal {O} (p \epsilon^ {p - 1}) \\ \end{array}
342
+ $$
343
+
344
+ Thus, we can across the board replace the summation over latents by the single term corresponding to the latent that was sampled at that time.
345
+
346
+ $$
347
+ \begin{array}{l} \nabla_ {\theta} \log P (\tau) = \sum_ {k = 0} ^ {H / p} \frac {1}{\pi_ {\theta_ {h}} (z _ {k p} | s _ {k p}) \prod_ {t = k p} ^ {(k + 1) p - 1} \pi_ {\theta_ {l}} (a _ {t} | s _ {t} , z _ {k p})} \nabla_ {\theta} \Big (P (z _ {k p} | s _ {k p}) \prod_ {t = k p} ^ {(k + 1) p - 1} \pi_ {\theta_ {l}} (a _ {t} | s _ {t}, z _ {k p}) \Big) + \frac {n H}{p} \mathcal {O} (p \epsilon^ {p - 1}) \\ = \sum_ {k = 0} ^ {H / p} \nabla_ {\theta} \log \left(\pi_ {\theta_ {h}} (z _ {k p} | s _ {k p}) \prod_ {t = k p} ^ {(k + 1) p - 1} \pi_ {\theta_ {t}} (a _ {t} | s _ {t}, z _ {k p})\right) + \mathcal {O} (n H \epsilon^ {p - 1}) \\ = \mathbb {E} _ {\tau} \left[ \left(\sum_ {k = 0} ^ {H / p} \nabla_ {\theta} \log \pi_ {\theta_ {h}} (z _ {k p} | s _ {k p}) + \sum_ {t = 1} ^ {H} \nabla_ {\theta} \log \pi_ {\theta_ {l}} (a _ {t} | s _ {t}, z _ {k p})\right) \right] + \mathcal {O} (n H \epsilon^ {p - 1}) \\ \end{array}
348
+ $$
349
+
350
+ Interestingly, this is exactly $\nabla_{\theta}P(s_0,z_0,a_0,s_1,\ldots)$ . In other words, it's the gradient of the probability of that trajectory, where the trajectory now includes the variables $z$ as if they were observed.
351
+
352
+ ![](images/0f0685675e0bc1c6fb0ee47f5013b3d68b1af691f526769a79f1781fbdbd917d.jpg)
353
+
354
+ Lemma 2. For any functions $b_{h}:\mathcal{S}\to \mathbb{R}$ and $b_{l}:\mathcal{S}\times \mathcal{Z}\rightarrow \mathbb{R}$ we have:
355
+
356
+ $$
357
+ \begin{array}{l} \mathbb {E} _ {\tau} [ \sum_ {k = 0} ^ {H / p} \nabla_ {\theta} \log P (z _ {k p} | s _ {k p}) b (s _ {k p}) ] = 0 \\ \mathbb {E} _ {\tau} \left[ \sum_ {t = 0} ^ {H} \nabla_ {\theta} \log \pi_ {\theta_ {l}} \left(a _ {t} \mid s _ {t}, z _ {k p}\right) b \left(s _ {t}, z _ {k p}\right) \right] = 0 \\ \end{array}
358
+ $$
359
+
360
+ Proof. We can use the tower property as well as the fact that the interior expression only depends on $s_{kp}$ and $z_{kp}$ :
361
+
362
+ $$
363
+ \begin{array}{l} \mathbb {E} _ {\tau} [ \sum_ {k = 0} ^ {H / p} \nabla_ {\theta} \log P (z _ {k p} | s _ {k p}) b (s _ {k p}) ] = \sum_ {k = 0} ^ {H / p} \mathbb {E} _ {s _ {k p}, z _ {k p}} [ \mathbb {E} _ {\tau \backslash s _ {k p}, z _ {k p}} [ \nabla_ {\theta} \log P (z _ {k p} | s _ {k p}) b (s _ {k p}) ] ] \\ = \sum_ {k = 0} ^ {H / p} \mathbb {E} _ {s _ {k p}, z _ {k p}} [ \nabla_ {\theta} \log P (z _ {k p} | s _ {k p}) b (s _ {k p}) ] \\ \end{array}
364
+ $$
365
+
366
+ Then, we can write out the definition of the expectation and undo the gradient-log trick to prove that the baseline is unbiased.
367
+
368
+ $$
369
+ \begin{array}{l} \mathbb {E} _ {\tau} \left[ \sum_ {k = 0} ^ {H / p} \nabla_ {\theta} \log \pi_ {\theta_ {h}} \left(z _ {k p} \mid s _ {k p}\right) b \left(s _ {k p}\right) \right] = \sum_ {k = 0} ^ {H / p} \int_ {\left(s _ {k p}, z _ {k p}\right)} P \left(s _ {k p}, z _ {k p}\right) \nabla_ {\theta} \log \pi_ {\theta_ {h}} \left(z _ {k p} \mid s _ {k p}\right) b \left(s _ {k p}\right) d z _ {k p} d s _ {k p} \\ = \sum_ {k = 0} ^ {H / p} \int_ {s _ {k p}} P (s _ {k p}) b (s _ {k p}) \int_ {z _ {k p}} \pi_ {\theta_ {h}} (z _ {k p} | s _ {k p}) \nabla_ {\theta} \log \pi_ {\theta_ {h}} (z _ {k p} | s _ {k p}) d z _ {k p} d s _ {k p} \\ = \sum_ {k = 0} ^ {H / p} \int_ {s _ {k p}} P (s _ {k p}) b (s _ {k p}) \int_ {z _ {k p}} \pi_ {\theta_ {h}} (z _ {k p} | s _ {k p}) \frac {1}{\pi_ {\theta_ {h}} (z _ {k p} | s _ {k p})} \nabla_ {\theta} \pi_ {\theta_ {h}} (z _ {k p} | s _ {k p}) d z _ {k p} d s _ {k p} \\ = \sum_ {k = 0} ^ {H / p} \int_ {s _ {k p}} P (s _ {k p}) b (s _ {k p}) \nabla_ {\theta} \int_ {z _ {k p}} \pi_ {\theta_ {h}} (z _ {k p} | s _ {k p}) d z _ {k p} d s _ {k p} \\ = \sum_ {k = 0} ^ {H / p} \int_ {s _ {k p}} P \left(s _ {k p}\right) b \left(s _ {k p}\right) \nabla_ {\theta} 1 d s _ {k p} \\ = 0 \\ \end{array}
370
+ $$
371
+
372
+ ![](images/7ceffe540c4177d2a3158a7a92ee210f0b4e8ef131f0459f8ac358dceeff04ee.jpg)
373
+
374
+ Subtracting a state- and subpolicy- dependent baseline from the second term is also unbiased, i.e.
375
+
376
+ $$
377
+ \mathbb {E} _ {\tau} \left[ \sum_ {t = 0} ^ {H} \nabla_ {\theta} \log \pi_ {s, \theta} \left(a _ {t} \mid s _ {t}, z _ {k p}\right) b \left(s _ {t}, z _ {k p}\right) \right] = 0
378
+ $$
379
+
380
+ We'll follow the same strategy to prove the second equality: apply the tower property, express the expectation as an integral, and undo the gradient-log trick.
381
+
382
+ $$
383
+ \begin{array}{l} \mathbb {E} _ {\tau} \left[ \sum_ {t = 0} ^ {H} \nabla_ {\theta} \log \pi_ {\theta_ {l}} \left(a _ {t} \mid s _ {t}, z _ {k p}\right) b \left(s _ {t}, z _ {k p}\right) \right] \\ = \sum_ {t = 0} ^ {H} \mathbb {E} _ {s _ {t}, a _ {t}, z _ {k p}} \left[ \mathbb {E} _ {\tau \backslash s _ {t}, a _ {t}, z _ {k p}} \left[ \nabla_ {\theta} \log \pi_ {\theta_ {m}} \left(a _ {t} \mid s _ {t}, z _ {k p}\right) b \left(s _ {t}, z _ {k p}\right) \right] \right] \\ = \sum_ {t = 0} ^ {H} \mathbb {E} _ {s _ {t}, a _ {t}, z _ {k p}} \left[ \nabla_ {\theta} \log \pi_ {\theta_ {l}} \left(a _ {t} \mid s _ {t}, z _ {k p}\right) b \left(s _ {k p}, z _ {k p}\right) \right] \\ = \sum_ {t = 0} ^ {H} \int_ {\left(s _ {t}, z _ {k p}\right)} P \left(s _ {t}, z _ {k p}\right) b \left(s _ {t}, z _ {k p}\right) \int_ {a _ {t}} \pi_ {\theta_ {l}} \left(a _ {t} \mid s _ {t}, z _ {k p}\right) \nabla_ {\theta} \log \pi_ {\theta_ {l}} \left(a _ {t} \mid s _ {t}, z _ {k p}\right) d a _ {t} d z _ {k p} d s _ {t} \\ = \sum_ {t = 0} ^ {H} \int_ {\left(s _ {t}, z _ {k p}\right)} P \left(s _ {t}, z _ {k p}\right) b \left(s _ {t}, z _ {k p}\right) \nabla_ {\theta} 1 d z _ {k p} d s _ {t} \\ = 0 \\ \end{array}
384
+ $$
385
+
386
+ ![](images/29a8de46a1a13c72e71ef2c0a1ee1ee7aad9d4dc8f0bbe3244122fa12c118240.jpg)
387
+ Figure 7: HIRO performance on Ant Gather with and without access to the ground truth $(x, y)$ , which it needs to communicate useful goals.
388
+
389
+ # D HIRO SENSITIVITY TO OBSERVATION-SPACE
390
+
391
+ In this section we provide a more detailed explanation of why HIRO (Nachum et al., 2018) performs poorly under our environments. As explained in our related work section, HIRO belongs to the general category of algorithms that train goal-reaching policies as lower levels of the hierarchy (Vezhnevets et al., 2017; Levy et al., 2017). These methods rely on having a goal-space that is meaningful for the task at hand. For example, in navigation tasks they require having access to the $(x,y)$ position of the agent such that deltas in that space can be given as meaningful goals to move in the environment. Unfortunately, in many cases the only readily available information (if there's no GPS signal or other positioning system installed) are raw sensory inputs, like cameras or the LIDAR sensors we mimic in our environments. In such cases, our method still performs well because it doesn't rely on the goal-reaching extra supervision that is leveraged (and detrimental in this case) in HIRO and similar methods. In Figure 7, we show that knowing the ground truth location is critical for its success. We have reproduced the HIRO results in Fig. 7 using the published codebase, so we are convinced that our results showcase a failure mode of HIRO.
392
+
393
+ # E HYPERPARAMETER SENSITIVITY PLOTS
394
+
395
+ ![](images/21bd1eb45a806666f518175cb930dc9e23093ee5f8583dc07060b2ecf4dda6b6.jpg)
396
+ Figure 8: Sensitivity of HiPPO to variation in the time-commitment.
397
+
398
+ ![](images/ba2a1e30228d16da83f12b4240047b7d46773ef33cb0375345dc7deff0651c95.jpg)
399
+
400
+ ![](images/8a50d8d2eb03fb965b0310a137a4359944c16493cf8fca4182555876111ba434.jpg)
401
+ Figure 9: Sensitivity of HiPPO to variation in the number of skills.
402
+
403
+ ![](images/ea8a1eb1caa0ba0c60df1c9ea5b14f0eb2f6546de6fae91ecb971dfb634453d8.jpg)
subpolicyadaptationforhierarchicalreinforcementlearning/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f6d5b58273cdda6aea7b593f639f519c4aacf123c39a1e15c203c509a02533c
3
+ size 754038
subpolicyadaptationforhierarchicalreinforcementlearning/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34fe64aa32d668cf04d4f78a34b0f4ebe98ef19023462530f6bc7bbf59fae07f
3
+ size 487253
symplecticodenetlearninghamiltoniandynamicswithcontrol/7de5f77e-81be-4e86-af97-52c5a4dc0226_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abfd370611980f1d0bf17e42f0bab2ad7158efcde5af0f855127189fdd2e28de
3
+ size 128754
symplecticodenetlearninghamiltoniandynamicswithcontrol/7de5f77e-81be-4e86-af97-52c5a4dc0226_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6a4cc537eee86881fdbd5e5a25899a86a7b0b3a3ff5acd2466beee3ad9c2e40
3
+ size 151402
symplecticodenetlearninghamiltoniandynamicswithcontrol/7de5f77e-81be-4e86-af97-52c5a4dc0226_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f432358e4bb2e01790cf1864022b93295d7150ddd3bcaa0d507dffac36a95bce
3
+ size 664675
symplecticodenetlearninghamiltoniandynamicswithcontrol/full.md ADDED
@@ -0,0 +1,604 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SYMPTECTIC ODE-NET: LEARNING HAMILTONIAN DYNAMICS WITH CONTROL
2
+
3
+ Yaofeng Desmond Zhong*
4
+
5
+ Princeton University
6
+
7
+ y.zhong@princeton.edu
8
+
9
+ Biswadip Dey
10
+
11
+ Siemens Corporate Technology
12
+
13
+ biswadip.dey@siemens.com
14
+
15
+ Amit Chakraborty
16
+
17
+ Siemens Corporate Technology
18
+
19
+ amit.chakraborty@siemens.com
20
+
21
+ # ABSTRACT
22
+
23
+ In this paper, we introduce Symplectic $^1$ ODE-Net (SymODEN), a deep learning framework which can infer the dynamics of a physical system, given by an ordinary differential equation (ODE), from observed state trajectories. To achieve better generalization with fewer training samples, SymODEN incorporates appropriate inductive bias by designing the associated computation graph in a physics-informed manner. In particular, we enforce Hamiltonian dynamics with control to learn the underlying dynamics in a transparent way, which can then be leveraged to draw insight about relevant physical aspects of the system, such as mass and potential energy. In addition, we propose a parametrization which can enforce this Hamiltonian formalism even when the generalized coordinate data is embedded in a high-dimensional space or we can only access velocity data instead of generalized momentum. This framework, by offering interpretable, physically-consistent models for physical systems, opens up new possibilities for synthesizing model-based control strategies.
24
+
25
+ # 1 INTRODUCTION
26
+
27
+ In recent years, deep neural networks (Goodfellow et al., 2016) have become very accurate and widely used in many application domains, such as image recognition (He et al., 2016), language comprehension (Devlin et al., 2019), and sequential decision making (Silver et al., 2017). To learn underlying patterns from data and enable generalization beyond the training set, the learning approach incorporates appropriate inductive bias (Haussler, 1988; Baxter, 2000) by promoting representations which are simple in some sense. It typically manifests itself via a set of assumptions, which in turn can guide a learning algorithm to pick one hypothesis over another. The success in predicting an outcome for previously unseen data then depends on how well the inductive bias captures the ground reality. Inductive bias can be introduced as the prior in a Bayesian model, or via the choice of computation graphs in a neural network.
28
+
29
+ In a variety of settings, especially in physical systems, wherein laws of physics are primarily responsible for shaping the outcome, generalization in neural networks can be improved by leveraging underlying physics for designing the computation graphs. Here, by leveraging a generalization of the Hamiltonian dynamics, we develop a learning framework which exploits the underlying physics in the associated computation graph. Our results show that incorporation of such physics-based inductive bias offers insight about relevant physical properties of the system, such as inertia, potential energy, total conserved energy. These insights, in turn, enable a more accurate prediction of future behavior and improvement in out-of-sample behavior. Furthermore, learning a physically-consistent model of the underlying dynamics can subsequently enable usage of model-based controllers which can provide performance guarantees for complex, nonlinear systems. In particular, insight about
30
+
31
+ kinetic and potential energy of a physical system can be leveraged to synthesize appropriate control strategies, such as the method of controlled Lagrangian (Bloch et al., 2001) and interconnection & damping assignment (Ortega et al., 2002), which can reshape the closed-loop energy landscape to achieve a broad range of control objectives (regulation, tracking, etc.).
32
+
33
+ # RELATED WORK
34
+
35
+ Physics-based Priors for Learning in Dynamical Systems: The last few years have witnessed a significant interest in incorporating physics-based priors into deep learning frameworks. Such approaches, in contrast to more rigid parametric system identification techniques (Söderström & Stoica, 1988), use neural networks to approximate the state-transition dynamics and therefore are more expressive. Sanchez-Gonzalez et al. (2018), by representing the causal relationships in a physical system as a directed graph, use a recurrent graph network to infer latent space dynamics of robotic systems. Lutter et al. (2019) and Gupta et al. (2019) leverage Lagrangian mechanics to learn the dynamics of kinematic structures from time-series data of position, velocity, and acceleration. A more recent (concurrent) work by Greydanus et al. (2019) uses Hamiltonian mechanics to learn the dynamics of autonomous, energy-conserved mechanical systems from time-series data of position, momentum, and their derivatives. A key difference between these approaches and the proposed one is that our framework does not require any information about higher-order derivatives (e.g., acceleration) and can incorporate external control into the Hamiltonian formalism.
36
+
37
+ Neural Networks for Dynamics and Control: Inferring underlying dynamics from time-series data plays a critical role in controlling closed-loop response of dynamical systems, such as robotic manipulators (Lillicrap et al., 2015) and building HVAC systems (Wei et al., 2017). Although the use of neural networks towards identification and control of dynamical systems dates back to more than three decades ago (Narendra & Parthasarathy, 1990), recent advances in deep neural networks have led to renewed interest in this domain. Watter et al. (2015) learn dynamics with control from high-dimensional observations (raw image sequences) using a variational approach and synthesize an iterative LQR controller to control physical systems by imposing a locally linear constraint. Karl et al. (2016) and Krishnan et al. (2017) adopt a variational approach and use recurrent architectures to learn state-space models from noisy observation. SE3-Nets (Byravan & Fox, 2017) learn $SE(3)$ transformation of rigid bodies from point cloud data. Ayed et al. (2019) use partial information about the system state to learn a nonlinear state-space model. However, this body of work, while attempting to learn state-space models, does not take physics-based priors into consideration.
38
+
39
+ # CONTRIBUTION
40
+
41
+ The main contribution of this work is two-fold. First, we introduce a learning framework called Symplectic ODE-Net (SymODEN) which encodes a generalization of the Hamiltonian dynamics. This generalization, by adding an external control term to the standard Hamiltonian dynamics, allows us to learn the system dynamics which conforms to Hamiltonian dynamics with control. With the learned structured dynamics, we are able to synthesize controllers to control the system to track a reference configuration. Moreover, by encoding the structure, we can achieve better predictions with smaller network sizes. Second, we take one step forward in combining the physics-based prior and the data-driven approach. Previous approaches (Lutter et al., 2019; Greydanus et al., 2019) require data in the form of generalized coordinates and their derivatives up to the second order. However, a large number of physical systems accommodate generalized coordinates which are non-Euclidean (e.g., angles), and such angle data is often obtained in the embedded form, i.e., $(\cos q,\sin q)$ instead of the coordinate $(q)$ itself. The underlying reason is that an angular coordinate lies on $\mathbb{S}^1$ instead of $\mathbb{R}^1$ . In contrast to previous approaches which do not address this aspect, SymODEN has been designed to work with angle data in the embedded form. Additionally, we leverage differentiable ODE solvers to avoid the need for estimating second-order derivatives of generalized coordinates. Code for the SymODEN framework and experiments is available at https://github.com/d-biswa/Symplectic-ODENet.
42
+
43
+ # 2 PRELIMINARY CONCEPTS
44
+
45
+ # 2.1 HAMILTONIAN DYNAMICS
46
+
47
+ Lagrangian dynamics and Hamiltonian dynamics are both reformulations of Newtonian dynamics. They provide novel insights into the laws of mechanics. In these formulations, the configuration of a system is described by its generalized coordinates. Over time, the configuration point of the system moves in the configuration space, tracing out a trajectory. Lagrangian dynamics describes the evolution of this trajectory, i.e., the equations of motion, in the configuration space. Hamiltonian dynamics, however, tracks the change of system states in the phase space, i.e. the product space of generalized coordinates $\mathbf{q} = (q_{1}, q_{2}, \dots, q_{n})$ and generalized momenta $\mathbf{p} = (p_{1}, p_{2}, \dots, p_{n})$ . In other words, Hamiltonian dynamics treats $\mathbf{q}$ and $\mathbf{p}$ on an equal footing. This not only provides symmetric equations of motion but also leads to a whole new approach to classical mechanics (Goldstein et al., 2002). Hamiltonian dynamics is also widely used in statistical and quantum mechanics.
48
+
49
+ In Hamiltonian dynamics, the time-evolution of a system is described by the Hamiltonian $H(\mathbf{q},\mathbf{p})$ , a scalar function of generalized coordinates and momenta. Moreover, in almost all physical systems, the Hamiltonian is the same as the total energy and hence can be expressed as
50
+
51
+ $$
52
+ H (\mathbf {q}, \mathbf {p}) = \frac {1}{2} \mathbf {p} ^ {T} \mathbf {M} ^ {- 1} (\mathbf {q}) \mathbf {p} + V (\mathbf {q}), \tag {1}
53
+ $$
54
+
55
+ where the mass matrix $\mathbf{M}(\mathbf{q})$ is symmetric positive definite and $V(\mathbf{q})$ represents the potential energy of the system. Correspondingly, the time-evolution of the system is governed by
56
+
57
+ $$
58
+ \dot {\mathbf {q}} = \frac {\partial H}{\partial \mathbf {p}} \quad \dot {\mathbf {p}} = - \frac {\partial H}{\partial \mathbf {q}}, \tag {2}
59
+ $$
60
+
61
+ where we have dropped explicit dependence on $\mathbf{q}$ and $\mathbf{p}$ for brevity of notation. Moreover, since
62
+
63
+ $$
64
+ \dot {H} = \left(\frac {\partial H}{\partial \mathbf {q}}\right) ^ {T} \dot {\mathbf {q}} + \left(\frac {\partial H}{\partial \mathbf {p}}\right) ^ {T} \dot {\mathbf {p}} = 0, \tag {3}
65
+ $$
66
+
67
+ the total energy is conserved along a trajectory of the system. The RHS of Equation (2) is called the symplectic gradient (Rowe et al., 1980) of $H$ , and Equation (3) shows that moving along the symplectic gradient keeps the Hamiltonian constant.
68
+
69
+ In this work, we consider a generalization of the Hamiltonian dynamics which provides a means to incorporate external control (u), such as force and torque. As external control is usually affine and only influences changes in the generalized momenta, we can express this generalization as
70
+
71
+ $$
72
+ \left[ \begin{array}{l} \dot {\mathbf {q}} \\ \dot {\mathbf {p}} \end{array} \right] = \left[ \begin{array}{c} \frac {\partial H}{\partial \mathbf {p}} \\ - \frac {\partial H}{\partial \mathbf {q}} \end{array} \right] + \left[ \begin{array}{c} \mathbf {0} \\ \mathbf {g} (\mathbf {q}) \end{array} \right] \mathbf {u}, \tag {4}
73
+ $$
74
+
75
+ where the input matrix $\mathbf{g}(\mathbf{q})$ is typically assumed to have full column rank. For $\mathbf{u} = \mathbf{0}$ , the generalized dynamics reduces to the classical Hamiltonian dynamics (2) and the total energy is conserved; however, when $\mathbf{u} \neq \mathbf{0}$ , the system has a dissipation-free energy exchange with the environment.
76
+
77
+ # 2.2 CONTROL VIA ENERGY SHAPING
78
+
79
+ Once we have learned the dynamics of a system, the learned model can be used to synthesize a controller for driving the system to a reference configuration $\mathbf{q}^{\star}$ . As the proposed approach offers insight about the energy associated with a system, it is a natural choice to exploit this information for synthesizing controllers via energy shaping (Ortega et al., 2001). As energy is a fundamental aspect of physical systems, reshaping the associated energy landscape enables us to specify a broad range of control objectives and synthesize nonlinear controllers with provable performance guarantees.
80
+
81
+ If $\mathrm{rank}(\mathbf{g}(\mathbf{q})) = \mathrm{rank}(\mathbf{q})$ , the system is fully-actuated and we have control over any dimension of "acceleration" in $\dot{\mathbf{p}}$ . For such fully-actuated systems, a controller $\mathbf{u}(\mathbf{q},\mathbf{p}) = \beta (\mathbf{q}) + \mathbf{v}(\mathbf{p})$ can be synthesized via potential energy shaping $\beta (\mathbf{q})$ and damping injection $\mathbf{v}(\mathbf{p})$ . For completeness, we restate this procedure (Ortega et al., 2001) using our notation. As the name suggests, the goal of potential energy shaping is to synthesize $\beta (\mathbf{q})$ such that the closed-loop system behaves as if its time-evolution is governed by a desired Hamiltonian $H_{d}$ . With this, we have
82
+
83
+ $$
84
+ \left[ \begin{array}{l} \dot {\mathbf {q}} \\ \dot {\mathbf {p}} \end{array} \right] = \left[ \begin{array}{c} \frac {\partial H}{\partial \mathbf {p}} \\ - \frac {\partial H}{\partial \mathbf {q}} \end{array} \right] + \left[ \begin{array}{l} \mathbf {0} \\ \mathbf {g} (\mathbf {q}) \end{array} \right] \beta (\mathbf {q}) = \left[ \begin{array}{c} \frac {\partial H _ {d}}{\partial \mathbf {p}} \\ - \frac {\partial H _ {d}}{\partial \mathbf {q}} \end{array} \right], \tag {5}
85
+ $$
86
+
87
+ where the difference between the desired Hamiltonian and the original one lies in their potential energy term, i.e.
88
+
89
+ $$
90
+ H _ {d} (\mathbf {q}, \mathbf {p}) = \frac {1}{2} \mathbf {p} ^ {T} \mathbf {M} ^ {- 1} (\mathbf {q}) \mathbf {p} + V _ {d} (\mathbf {q}). \tag {6}
91
+ $$
92
+
93
+ In other words, $\beta (\mathbf{q})$ shape the potential energy such that the desired Hamiltonian $H_{d}(\mathbf{q},\mathbf{p})$ has a minimum at $(\mathbf{q}^{\star},\mathbf{0})$ . Then, by substituting Equation (1) and Equation (6) into Equation (5), we get
94
+
95
+ $$
96
+ \boldsymbol {\beta} (\mathbf {q}) = \mathbf {g} ^ {T} \left(\mathbf {g} \mathbf {g} ^ {T}\right) ^ {- 1} \left(\frac {\partial V}{\partial \mathbf {q}} - \frac {\partial V _ {d}}{\partial \mathbf {q}}\right). \tag {7}
97
+ $$
98
+
99
+ Thus, with potential energy shaping, we ensure that the system has the lowest energy at the desired reference configuration. Furthermore, to ensure that trajectories actually converge to this configuration, we add an additional damping term<sup>2</sup> given by
100
+
101
+ $$
102
+ \mathbf {v} (\mathbf {p}) = - \mathbf {g} ^ {T} \left(\mathbf {g} \mathbf {g} ^ {T}\right) ^ {- 1} \left(\mathbf {K} _ {d} \mathbf {p}\right). \tag {8}
103
+ $$
104
+
105
+ However, for underactuated systems, potential energy shaping alone cannot drive the system to a desired configuration. We also need kinetic energy shaping for this purpose (Chang et al., 2002).
106
+
107
+ Remark If the desired potential energy is chosen to be a quadratic of the form
108
+
109
+ $$
110
+ V _ {d} (\mathbf {q}) = \frac {1}{2} \left(\mathbf {q} - \mathbf {q} ^ {\star}\right) ^ {T} \mathbf {K} _ {p} \left(\mathbf {q} - \mathbf {q} ^ {\star}\right), \tag {9}
111
+ $$
112
+
113
+ the external forcing term can be expressed as
114
+
115
+ $$
116
+ \mathbf {u} = \mathbf {g} ^ {T} \left(\mathbf {g} \mathbf {g} ^ {T}\right) ^ {- 1} \left(\frac {\partial V}{\partial \mathbf {q}} - \mathbf {K} _ {p} (\mathbf {q} - \mathbf {q} ^ {\star}) - \mathbf {K} _ {d} \mathbf {p}\right). \tag {10}
117
+ $$
118
+
119
+ This can be interpreted as a PD controller with an additional energy compensation term.
120
+
121
+ # 3 SYMPLECTIC ODE-NET
122
+
123
+ In this section, we introduce the network architecture of Symplectic ODE-Net. In Subsection 3.1, we show how to learn an ordinary differential equation with a constant control term. In Subsection 3.2, we assume we have access to generalized coordinate and momentum data and derive the network architecture. In Subsection 3.3, we take one step further to propose a data-driven approach to deal with data of embedded angle coordinates. In Subsection 3.4, we put together the line of reasoning introduced in the previous two subsections to propose SymODEN for learning dynamics on the hybrid space $\mathbb{R}^n\times \mathbb{T}^m$ .
124
+
125
+ # 3.1 TRAINING NEURAL ODE WITH CONSTANT FORCING
126
+
127
+ Now we focus on the problem of learning the ordinary differential equation (ODE) from time series data. Consider an ODE: $\dot{\mathbf{x}} = \mathbf{f}(\mathbf{x})$ . Assume we don't know the analytical expression of the right hand side (RHS) and we approximate it with a neural network. If we have time series data $\mathbf{X} = (\mathbf{x}_{t_0}, \mathbf{x}_{t_1}, \dots, \mathbf{x}_{t_n})$ , how could we learn $\mathbf{f}(\mathbf{x})$ from the data?
128
+
129
+ Chen et al. (2018) introduced Neural ODE, differentiable ODE solvers with O(1)-memory backpropagation. With Neural ODE, we make predictions by approximating the RHS function using a neural network $\mathbf{f}_{\theta}$ and feed it into an ODE solver
130
+
131
+ $$
132
+ \hat {\mathbf {x}} _ {t _ {1}}, \hat {\mathbf {x}} _ {t _ {2}}, \dots , \hat {\mathbf {x}} _ {t _ {n}} = \operatorname {O D E S o l v e} \left(\mathbf {x} _ {t _ {0}}, \mathbf {f} _ {\theta}, t _ {1}, t _ {2}, \dots , t _ {n}\right)
133
+ $$
134
+
135
+ We can then construct the loss function $L = \| \mathbf{X} - \hat{\mathbf{X}}\| _2^2$ and update the weights $\theta$ by backpropagating through the ODE solver.
136
+
137
+ In theory, we can learn $\mathbf{f}_{\theta}$ in this way. In practice, however, the neural net is hard to train if $n$ is large. If we have a bad initial estimate of the $\mathbf{f}_{\theta}$ , the prediction error would in general be large. Although $|\mathbf{x}_{t_1} - \hat{\mathbf{x}}_{t_1}|$ might be small, $\hat{\mathbf{x}}_{t_N}$ would be far from $\mathbf{x}_{t_N}$ as error accumulates, which makes the neural network hard to train. In fact, the prediction error of $\hat{\mathbf{x}}_{t_N}$ is not as important as $\hat{\mathbf{x}}_{t_1}$ . In other words, we should weight data points in a short time horizon more than the rest of the data points. In order
138
+
139
+ to address this and better utilize the data, we introduce the time horizon $\tau$ as a hyperparameter and predict $\mathbf{x}_{t_{i + 1}},\mathbf{x}_{t_{i + 2}},\dots,\mathbf{x}_{t_{i + \tau}}$ from initial condition $\mathbf{x}_{t_i}$ , where $i = 0,\ldots ,n - \tau$
140
+
141
+ One challenge toward leveraging Neural ODE to learn state-space models is the incorporation of the control term into the dynamics. Equation (4) has the form $\dot{\mathbf{x}} = \mathbf{f}(\mathbf{x},\mathbf{u})$ with $\mathbf{x} = (\mathbf{q},\mathbf{p})$ . A function of this form cannot be directly fed into Neural ODE directly since the domain and range of $\mathbf{f}$ have different dimensions. In general, if our data consist of trajectories of $(\mathbf{x},\mathbf{u})_{t_0,\dots,t_n}$ where $\mathbf{u}$ remains the same in a trajectory, we can leverage the augmented dynamics
142
+
143
+ $$
144
+ \left[ \begin{array}{c} \dot {\mathbf {x}} \\ \dot {\mathbf {u}} \end{array} \right] = \left[ \begin{array}{c} \mathbf {f} _ {\theta} (\mathbf {x}, \mathbf {u}) \\ \mathbf {0} \end{array} \right] = \tilde {\mathbf {f}} _ {\theta} (\mathbf {x}, \mathbf {u}). \tag {11}
145
+ $$
146
+
147
+ With Equation (11), we can match the input and output dimension of $\tilde{\mathbf{f}}_{\theta}$ , which enables us to feed it into Neural ODE. The idea here is to use different constant external forcing to get the system responses and use those responses to train the model. With a trained model, we can apply a time-varying $\mathbf{u}$ to the dynamics $\dot{\mathbf{x}} = \mathbf{f}_{\theta}(\mathbf{x},\mathbf{u})$ and generate estimated trajectories. When we synthesize the controller, $\mathbf{u}$ remains constant in each integration step. As long as our model interpolates well among different values of constant $\mathbf{u}$ , we could get good estimated trajectories with a time-varying $\mathbf{u}$ . The problem is then how to design the network architecture of $\tilde{\mathbf{f}}_{\theta}$ , or equivalently $\mathbf{f}_{\theta}$ such that we can learn the dynamics in an efficient way.
148
+
149
+ # 3.2 LEARNING FROM GENERALIZED COORDINATE AND MOMENTUM
150
+
151
+ Suppose we have trajectory data consisting of $(\mathbf{q},\mathbf{p},\mathbf{u})_{t_0,\dots ,t_n}$ , where $\mathbf{u}$ remains constant in a trajectory. If we have the prior knowledge that the unforced dynamics of $\mathbf{q}$ and $\mathbf{p}$ is governed by Hamiltonian dynamics, we can use three neural nets - $\mathbf{M}_{\theta_1}^{-1}(\mathbf{q})$ , $V_{\theta_2}(\mathbf{q})$ and $\mathbf{g}_{\theta_3}(\mathbf{q})$ - as function approximators to represent the inverse of mass matrix, potential energy and the input matrix. Thus,
152
+
153
+ $$
154
+ \mathbf {f} _ {\theta} (\mathbf {q}, \mathbf {p}, \mathbf {u}) = \left[ \begin{array}{c} \frac {\partial H _ {\theta_ {1} , \theta_ {2}}}{\partial \mathbf {p}} \\ - \frac {\partial H _ {\theta_ {1} , \theta_ {2}}}{\partial \mathbf {q}} \end{array} \right] + \left[ \begin{array}{c} \mathbf {0} \\ \mathbf {g} _ {\theta_ {3}} (\mathbf {q}) \end{array} \right] \mathbf {u} \tag {12}
155
+ $$
156
+
157
+ where
158
+
159
+ $$
160
+ H _ {\theta_ {1}, \theta_ {2}} (\mathbf {q}, \mathbf {p}) = \frac {1}{2} \mathbf {p} ^ {T} \mathbf {M} _ {\theta_ {1}} ^ {- 1} (\mathbf {q}) \mathbf {p} + V _ {\theta_ {2}} (\mathbf {q}) \tag {13}
161
+ $$
162
+
163
+ The partial derivative in the expression can be taken care of by automatic differentiation. by putting the designed $\mathbf{f}_{\theta}(\mathbf{q},\mathbf{p},\mathbf{u})$ into Neural ODE, we obtain a systematic way of adding the prior knowledge of Hamiltonian dynamics into end-to-end learning.
164
+
165
+ # 3.3 LEARNING FROM EMBEDDED ANGLE DATA
166
+
167
+ In the previous subsection, we assume $(\mathbf{q},\mathbf{p},\mathbf{u})_{t_0,\dots ,t_n}$ . In a lot of physical system models, the state variables involve angles which reside in the interval $[- \pi ,\pi)$ . In other words, each angle resides on the manifold $\mathbb{S}^1$ . From a data-driven perspective, the data that respects the geometry is a 2 dimensional embedding $(\cos q,\sin q)$ . Furthermore, the generalized momentum data is usually not available. Instead, the velocity is often available. For example, in OpenAI Gym (Brockman et al., 2016) Pendulum-v0 task, the observation is $(\cos q,\sin q,\dot{q})$ .
168
+
169
+ From a theoretical perspective, however, the angle itself is often used, instead of the 2D embedding. The reason being both the Lagrangian and the Hamiltonian formulations are derived using generalized coordinates. Using an independent generalized coordinate system makes it easier to solve for the equations of motion.
170
+
171
+ In this subsection, we take the data-driven standpoint and develop an angle-aware method to accommodate the underlying manifold structure. We assume all the generalized coordinates are angles and the data comes in the form of $(\mathbf{x}_1(\mathbf{q}),\mathbf{x}_2(\mathbf{q}),\mathbf{x}_3(\dot{\mathbf{q}}),\mathbf{u})_{t_0,\dots,t_n} = (\cos \mathbf{q},\sin \mathbf{q},\dot{\mathbf{q}},\mathbf{u})_{t_0,\dots,t_n}$ . We aim to incorporate our theoretical prior - Hamiltonian dynamics - into the data-driven approach. The goal is to learn the dynamics of $\mathbf{x}_1$ , $\mathbf{x}_2$ and $\mathbf{x}_3$ . Noticing $\mathbf{p} = \mathbf{M}(\mathbf{x}_1,\mathbf{x}_2)\dot{\mathbf{q}}$ , we can write down the derivative of $\mathbf{x}_1$ , $\mathbf{x}_2$ and $\mathbf{x}_3$ ,
172
+
173
+ $$
174
+ \dot {\mathbf {x}} _ {1} = - \sin \mathbf {q} \circ \dot {\mathbf {q}} = - \mathbf {x} _ {2} \circ \dot {\mathbf {q}}
175
+ $$
176
+
177
+ $$
178
+ \dot {\mathbf {x}} _ {2} = \cos \mathbf {q} \circ \dot {\mathbf {q}} = \mathbf {x} _ {1} \circ \dot {\mathbf {q}} \tag {14}
179
+ $$
180
+
181
+ $$
182
+ \dot {\mathbf {x}} _ {3} = \frac {\mathrm {d}}{\mathrm {d} t} (\mathbf {M} ^ {- 1} (\mathbf {x} _ {1}, \mathbf {x} _ {2}) \mathbf {p}) = \frac {\mathrm {d}}{\mathrm {d} t} (\mathbf {M} ^ {- 1} (\mathbf {x} _ {1}, \mathbf {x} _ {2})) \mathbf {p} + \mathbf {M} ^ {- 1} (\mathbf {x} _ {1}, \mathbf {x} _ {2}) \dot {\mathbf {p}}
183
+ $$
184
+
185
+ where “ $\circ$ ” represents the elementwise product (i.e., Hadamard product). We assume $\mathbf{q}$ and $\mathbf{p}$ evolve with the generalized Hamiltonian dynamics Equation (4). Here the Hamiltonian $H(\mathbf{x}_1, \mathbf{x}_2, \mathbf{p})$ is a function of $\mathbf{x}_1$ , $\mathbf{x}_2$ and $\mathbf{p}$ instead of $\mathbf{q}$ and $\mathbf{p}$ .
186
+
187
+ $$
188
+ \dot {\mathbf {q}} = \frac {\partial H}{\partial \mathbf {p}} \tag {15}
189
+ $$
190
+
191
+ $$
192
+ \begin{array}{l} \dot {\mathbf {p}} = - \frac {\partial H}{\partial \mathbf {q}} + \mathbf {g} (\mathbf {x} _ {1}, \mathbf {x} _ {2}) \mathbf {u} = - \frac {\partial \mathbf {x} _ {1}}{\partial \mathbf {q}} \frac {\partial H}{\partial \mathbf {x} _ {1}} - \frac {\partial \mathbf {x} _ {2}}{\partial \mathbf {q}} \frac {\partial H}{\partial \mathbf {x} _ {2}} + \mathbf {g} (\mathbf {x} _ {1}, \mathbf {x} _ {2}) \mathbf {u} \\ = \sin \mathbf {q} \circ \frac {\partial H}{\partial \mathbf {x} _ {1}} - \cos \mathbf {q} \circ \frac {\partial H}{\partial \mathbf {x} _ {2}} + \mathbf {g} (\mathbf {x} _ {1}, \mathbf {x} _ {2}) \mathbf {u} = \mathbf {x} _ {2} \circ \frac {\partial H}{\partial \mathbf {x} _ {1}} - \mathbf {x} _ {1} \circ \frac {\partial H}{\partial \mathbf {x} _ {2}} + \mathbf {g} (\mathbf {x} _ {1}, \mathbf {x} _ {2}) \mathbf {u} \tag {16} \\ \end{array}
193
+ $$
194
+
195
+ Then the right hand side of Equation (14) can be expressed as a function of state variables and control $(\mathbf{x}_1,\mathbf{x}_2,\mathbf{x}_3,\mathbf{u})$ . Thus, it can be fed into the Neural ODE. We use three neural nets - $\mathbf{M}_{\theta_1}^{-1}(\mathbf{x}_1,\mathbf{x}_2)$ , $V_{\theta_2}(\mathbf{x}_1,\mathbf{x}_2)$ and $\mathbf{g}_{\theta_3}(\mathbf{x}_1,\mathbf{x}_2)$ - as function approximators. Substitute Equation (15) and Equation (16) into Equation (14), then the RHS serves as $\mathbf{f}_{\theta}(\mathbf{x}_1,\mathbf{x}_2,\mathbf{x}_3,\mathbf{u})$ .
196
+
197
+ $$
198
+ \mathbf {f} _ {\theta} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}, \mathbf {x} _ {3}, \mathbf {u}\right) = \left[ \begin{array}{c} - \mathbf {x} _ {2} \circ \frac {\partial H _ {\theta_ {1} , \theta_ {2}}}{\partial \mathbf {p}} \\ \mathbf {x} _ {1} \circ \frac {\partial H _ {\theta_ {1} , \theta_ {2}}}{\partial \mathbf {p}} \\ \frac {\mathrm {d}}{\mathrm {d} t} \left(\mathbf {M} _ {\theta_ {1}} ^ {- 1} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}\right)\right) \mathbf {p} + \mathbf {M} _ {\theta_ {1}} ^ {- 1} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}\right) \left(\mathbf {x} _ {2} \circ \frac {\partial H _ {\theta_ {1} \theta_ {2}}}{\partial \mathbf {x} _ {1}} - \mathbf {x} _ {1} \circ \frac {\partial H _ {\theta_ {1} \theta_ {2}}}{\partial \mathbf {x} _ {2}} + \mathbf {g} _ {\theta_ {3}} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}\right) \mathbf {u}\right) \end{array} \right] \tag {17}
199
+ $$
200
+
201
+ where
202
+
203
+ $$
204
+ H _ {\theta_ {1}, \theta_ {2}} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}, \mathbf {p}\right) = \frac {1}{2} \mathbf {p} ^ {T} \mathbf {M} _ {\theta_ {1}} ^ {- 1} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}\right) \mathbf {p} + V _ {\theta_ {2}} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}\right) \tag {18}
205
+ $$
206
+
207
+ $$
208
+ \mathbf {p} = \mathbf {M} _ {\theta_ {1}} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}\right) \mathbf {x} _ {3} \tag {19}
209
+ $$
210
+
211
+ # 3.4 LEARNING ON HYBRID SPACES $\mathbb{R}^n\times \mathbb{T}^m$
212
+
213
+ In Subsection 3.2, we treated the generalized coordinates as translational coordinates. In Subsection 3.3, we developed an angle-aware method to better deal with embedded angle data. In most of physical systems, these two types of coordinates coexist. For example, robotics systems are usually modelled as interconnected rigid bodies. The positions of joints or center of mass are translational coordinates and the orientations of each rigid body are angular coordinates. In other words, the generalized coordinates lie on $\mathbb{R}^n\times \mathbb{T}^m$ , where $\mathbb{T}^m$ denotes the $m$ -torus, with $\mathbb{T}^1 = \mathbb{S}^1$ and $\mathbb{T}^2 = \mathbb{S}^1\times \mathbb{S}^1$ . In this subsection, we put together the architecture of the previous two subsections. We assume the generalized coordinates are $\mathbf{q} = (\mathbf{r},\boldsymbol {\phi})\in \mathbb{R}^n\times \mathbb{T}^m$ and the data comes in the form of $(\mathbf{x}_1,\mathbf{x}_2,\mathbf{x}_3,\mathbf{x}_4,\mathbf{x}_5,\mathbf{u})_{t_0,\dots ,t_n} = (\mathbf{r},\cos \phi ,\sin \phi ,\dot{\mathbf{r}},\dot{\boldsymbol{\phi}},\mathbf{u})_{t_0,\dots ,t_n}$ . With similar line of reasoning, we use three neural nets - $\mathbf{M}_{\theta_1}^{-1}(\mathbf{x}_1,\mathbf{x}_2,\mathbf{x}_3)$ , $V_{\theta_2}(\mathbf{x}_1,\mathbf{x}_2,\mathbf{x}_3)$ and $\mathbf{g}_{\theta_3}(\mathbf{x}_1,\mathbf{x}_2,\mathbf{x}_3)$ - as function approximators. We have
214
+
215
+ $$
216
+ \mathbf {p} = \mathbf {M} _ {\theta_ {1}} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2},, \mathbf {x} _ {3}\right) \left[ \begin{array}{l} \mathbf {x} _ {4} \\ \mathbf {x} _ {5} \end{array} \right] \tag {20}
217
+ $$
218
+
219
+ $$
220
+ H _ {\theta_ {1}, \theta_ {2}} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}, \mathbf {x} _ {3}, \mathbf {p}\right) = \frac {1}{2} \mathbf {p} ^ {T} \mathbf {M} _ {\theta_ {1}} ^ {- 1} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}, \mathbf {x} _ {3}\right) \mathbf {p} + V _ {\theta_ {2}} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}, \mathbf {x} _ {3}\right) \tag {21}
221
+ $$
222
+
223
+ with Hamiltonian dynamics, we have
224
+
225
+ $$
226
+ \dot {\mathbf {q}} = \left[ \begin{array}{l} \dot {\mathbf {r}} \\ \dot {\boldsymbol {\phi}} \end{array} \right] = \frac {\partial H _ {\theta_ {1} , \theta_ {2}}}{\partial \mathbf {p}} \tag {22}
227
+ $$
228
+
229
+ $$
230
+ \dot {\mathbf {p}} = \left[ \begin{array}{c} - \frac {\partial H _ {\theta_ {1} , \theta_ {2}}}{\partial \mathbf {x} _ {1}} \\ \mathbf {x} _ {3} \circ \frac {\partial H _ {\theta_ {1} , \theta_ {2}}}{\partial \mathbf {x} _ {2}} - \mathbf {x} _ {2} \circ \frac {\partial H _ {\theta_ {1} , \theta_ {2}}}{\partial \mathbf {x} _ {3}} \end{array} \right] + \mathbf {g} _ {\theta_ {3}} (\mathbf {x} _ {1}, \mathbf {x} _ {2}, \mathbf {x} _ {3}) \mathbf {u} \tag {23}
231
+ $$
232
+
233
+ Then
234
+
235
+ $$
236
+ \left[ \begin{array}{l} \dot {\mathbf {x}} _ {1} \\ \dot {\mathbf {x}} _ {2} \\ \dot {\mathbf {x}} _ {3} \\ \dot {\mathbf {x}} _ {4} \\ \dot {\mathbf {x}} _ {5} \end{array} \right] = \left[ \begin{array}{c} \dot {\mathbf {r}} \\ - \mathbf {x} _ {3} \dot {\boldsymbol {\phi}} \\ \mathbf {x} _ {2} \dot {\boldsymbol {\phi}} \\ \frac {\mathrm {d}}{\mathrm {d} t} \left(\mathbf {M} _ {\theta_ {1}} ^ {- 1} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}, \mathbf {x} _ {3}\right)\right) \mathbf {p} + \mathbf {M} _ {\theta_ {1}} ^ {- 1} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}, \mathbf {x} _ {3}\right) \dot {\mathbf {p}} \end{array} \right] = \mathbf {f} _ {\theta} \left(\mathbf {x} _ {1}, \mathbf {x} _ {2}, \mathbf {x} _ {3}, \mathbf {x} _ {4}, \mathbf {x} _ {5}, \mathbf {u}\right) \tag {24}
237
+ $$
238
+
239
+ where the $\dot{\mathbf{r}}$ and $\dot{\phi}$ come from Equation (22). Now we obtain a $\mathbf{f}_{\theta}$ which can be fed into Neural ODE. Figure 1 shows the flow of the computation graph based on Equation (20)-(24).
240
+
241
+ ![](images/89dc3b6724eaca76a2c9eeec8e018c8093b5c5ce1428dac936106f3bdc008b5a.jpg)
242
+ Figure 1: The computation graph of SymODEN. Blue arrows indicate neural network parametrization. Red arrows indicate automatic differentiation. For a given $(\mathbf{x},\mathbf{u})$ , the computation graph outputs a $\mathbf{f}_{\theta}(\mathbf{x},\mathbf{u})$ which follows Hamiltonian dynamics with control. The function itself is an input to the Neural ODE to generate estimation of states at each time step. Since all the operations are differentiable, weights of the neural networks can be updated by backpropagation.
243
+
244
+ # 3.5 POSITIVE DEFINITENESS OF THE MASS MATRIX
245
+
246
+ In real physical systems, the mass matrix $\mathbf{M}$ is positive definite, which ensures a positive kinetic energy with a non-zero velocity. The positive definiteness of $\mathbf{M}$ implies the positive definiteness of $\mathbf{M}_{\theta_1}^{-1}$ . Thus, we impose this constraint in the network architecture by $\mathbf{M}_{\theta_1}^{-1} = \mathbf{L}_{\theta_1}\mathbf{L}_{\theta_1}^T$ , where $\mathbf{L}_{\theta_1}$ is a lower-triangular matrix. The positive definiteness is ensured if the diagonal elements of $\mathbf{M}_{\theta_1}^{-1}$ are positive. In practice, this can be done by adding a small constant $\epsilon$ to the diagonal elements of $\mathbf{M}_{\theta_1}^{-1}$ . It not only makes $\mathbf{M}_{\theta_1}$ invertible, but also stabilize the training.
247
+
248
+ # 4 EXPERIMENTS
249
+
250
+ # 4.1 EXPERIMENTAL SETUP
251
+
252
+ We use the following four tasks to evaluate the performance of Symplectic ODE-Net model - (i) Task 1: a pendulum with generalized coordinate and momentum data (learning on $\mathbb{R}^1$ ); (ii) Task 2: a pendulum with embedded angle data (learning on $\mathbb{S}^1$ ); (iii) Task 3: a CartPole system (learning on $\mathbb{R}^1 \times \mathbb{S}^1$ ); and (iv) Task 4: an Acrobot (learning on $\mathbb{T}^2$ ).
253
+
254
+ Model Variants. Besides the Symplectic ODE-Net model derived above, we consider a variant by approximating the Hamiltonian using a fully connected neural net $H_{\theta_1,\theta_2}$ . We call it Unstructured Symplectic ODE-Net (Unstructured SymODEN) since this model does not exploit the structure of the Hamiltonian (1).
255
+
256
+ Baseline Models. In order to show that we can learn the dynamics better with less parameters by leveraging prior knowledge, we set up baseline models for all four experiments. For the pendulum with generalized coordinate and momentum data, the naive baseline model approximates Equation (12) - $\mathbf{f}_{\theta}(\mathbf{x},\mathbf{u})$ - by a fully connected neural net. For all the other experiments, which involves embedded angle data, we set up two different baseline models: naive baseline approximates $\mathbf{f}_{\theta}(\mathbf{x},\mathbf{u})$ by a fully connected neural net. It doesn't respect the fact that the coordinate pair, $\cos \phi$ and $\sin \phi$ lie on $\mathbb{T}^m$ . Thus, we set up the geometric baseline model which approximates $\dot{q}$ and $\dot{p}$ with a fully connected neural net. This ensures that the angle data evolves on $\mathbb{T}^m$ .
257
+
258
+ Data Generation. For all tasks, we randomly generated initial conditions of states and subsequently combined them with 5 different constant control inputs, i.e., $u = -2.0, -1.0, 0.0, 1.0, 2.0$ to produce the initial conditions and input required for simulation. The simulators integrate the corresponding dynamics for 20 time steps to generate trajectory data which is then used to construct the training set. The simulators for different tasks are different. For Task 1, we integrate the true generalized Hamiltonian dynamics with a time interval of 0.05 seconds to generate trajectories. All the other tasks deal with embedded angle data and velocity directly, so we use OpenAI Gym (Brockman et al., 2016) simulators to generate trajectory data. One drawback of using OpenAI Gym is that not all environments use the Runge-Kutta method (RK4) to carry out the integration. OpenAI Gym favors other numerical schemes over RK4 because of speed, but it is harder to learn the dynamics with
259
+
260
+ inaccurate data. For example, if we plot the total energy as a function of time from data generated by Pendulum-v0 environment with zero action, we see that the total energy oscillates around a constant by a significant amount, even though the total energy should be conserved. Thus, for Task 2 and Task 3, we use Pendulum-v0 and CartPole-v1, respectively, and replace the numerical integrator of the environments to RK4. For Task 4, we use the Acrobot-v1 environment which is already using RK4. We also change the action space of Pendulum-v0, CartPole-v1 and Acrobot-v1 to a continuous space with a large enough bound.
261
+
262
+ Model training. In all the tasks, we train our model using Adam optimizer (Kingma & Ba, 2014) with 1000 epochs. We set a time horizon $\tau = 3$ , and choose "RK4" as the numerical integration scheme in Neural ODE. We vary the size of the training set by doubling from 16 initial state conditions to 1024 initial state conditions. Each initial state condition is combined with five constant control $u = -2.0, -1.0, 0.0, 1.0, 2.0$ to produce initial condition for simulation. Each trajectory is generated by integrating the dynamics 20 time steps forward. We set the size of mini-batches to be the number of initial state conditions. We logged the train error per trajectory and the prediction error per trajectory in each case for all the tasks. The train error per trajectory is the mean squared error (MSE) between the estimated trajectory and the ground truth over 20 time steps. To evaluate the performance of each model in terms of long time prediction, we construct the metric of prediction error per trajectory by using the same initial state condition in the training set with a constant control of $u = 0.0$ , integrating 40 time steps forward, and calculating the MSE over 40 time steps. The reason for using only the unforced trajectories is that a constant nonzero control might cause the velocity to keep increasing or decreasing over time, and large absolute values of velocity are of little interest for synthesizing controllers.
263
+
264
+ # 4.2 TASK 1: PENDULUM WITH GENERALIZED COORDINATE AND MOMENTUM DATA
265
+
266
+ In this task, we use the model described in Section 3.2 and present the predicted trajectories of the learned models as well as the learned functions of SymODEN. We also point out the drawback of treating the angle data as a Cartesian coordinate. The dynamics of this task has the following form
267
+
268
+ $$
269
+ \dot {q} = 3 p, \quad \dot {p} = - 5 \sin q + u \tag {25}
270
+ $$
271
+
272
+ with Hamiltonian $H(q,p) = 1.5p^2 + 5(1 - \cos q)$ . In other words $M(q) = 3$ , $V(q) = 5(1 - \cos q)$ and $g(q) = 1$ .
273
+
274
+ ![](images/2dd9345c540f267bd756ddeb4669ee6cdd72718d93e1a32ab7e770c14f753ddf.jpg)
275
+ Figure 2: Sample trajectories and learned functions of Task 1.
276
+
277
+ In Figure 2, The ground truth is an unforced trajectory which is energy-conserved. The prediction trajectory of the baseline model does not conserve energy, while both the SymODEN and its unstructured variant predict energy-conserved trajectories. For SymODEN, the learned $g_{\theta_3}(q)$ and $M_{\theta_1}^{-1}(q)$ matches the ground truth well. $V_{\theta_2}(q)$ differs from the ground truth with a constant. This is acceptable since the potential energy is a relative notion. Only the derivative of $V_{\theta_2}(q)$ plays a role in the dynamics.
278
+
279
+ Here we treat $q$ as a variable in $\mathbb{R}^1$ and our training set contains initial conditions of $q \in [-\pi, 3\pi]$ . The learned functions do not extrapolate well outside this range, as we can see from the left part in the figures of $M_{\theta_1}^{-1}(q)$ and $V_{\theta_2}(q)$ . We address this issue by working directly with embedded angle data, which leads us to the next subsection.
280
+
281
+ # 4.3 TASK 2: PENDULUM WITH EMBEDDED DATA
282
+
283
+ In this task, the dynamics is the same as Equation (25) but the training data are generated by the OpenAI Gym simulator, i.e. we use embedded angle data and assume we only have access to $\dot{q}$ instead of $p$ . We use the model described in Section 3.3 and synthesize an energy-based controller (Section 2.2). Without true $p$ data, the learned function matches the ground truth with a scaling $\beta$ , as shown in Figure 3. To explain the scaling, let us look at the following dynamics
284
+
285
+ $$
286
+ \dot {q} = p / \alpha , \quad \dot {p} = - 1 5 \alpha \sin q + 3 \alpha u \tag {26}
287
+ $$
288
+
289
+ with Hamiltonian $H = p^2 / (2\alpha) + 15\alpha (1 - \cos q)$ . If we only look at the dynamics of $q$ , we have $\ddot{q} = -15\sin q + 3u$ , which is independent of $\alpha$ . If we don't have access to the generalized momentum $p$ , our trained neural network may converge to a Hamiltonian with a $\alpha_e$ which is
290
+
291
+ ![](images/939ba1c5100230a216ad0061e076585cab27eea4c6843a866b50503bedb1397d.jpg)
292
+ Figure 3: Without true generalized momentum data, the learned functions match the ground truth with a scaling. Here $\beta = 0.357$
293
+
294
+ ![](images/3c5ab47d004772543060908efba051a690601ec287043d4fefd841f3c1afe916.jpg)
295
+
296
+ ![](images/9ae144079713470699cc270e596d7c385f3401730910dc0552f7eb22409b5c05.jpg)
297
+
298
+ different from the true value, $\alpha_{t} = 1 / 3$ , in this task. By a scaling $\beta = \alpha_{t} / \alpha_{e} = 0.357$ , the learned functions match the ground truth. Even we are not learning the true $\alpha_{t}$ , we can still perform prediction and control since we are learning the dynamics of $q$ correctly. We let $V_{d} = -V_{\theta_{2}}(q)$ , then the desired Hamiltonian has minimum energy when the pendulum rests at the upward position. For the damping injection, we let $K_{d} = 3$ . Then from Equation (7) and (8), the controller we synthesize is
299
+
300
+ $$
301
+ u (\cos q, \sin q, \dot {q}) = g _ {\theta_ {3}} ^ {- 1} (\cos q, \sin q) \left(2 \left(- \frac {\partial V _ {\theta_ {2}}}{\partial \cos q} \sin q + \frac {\partial V _ {\theta_ {2}}}{\partial \sin q} \cos q\right) - 3 \dot {q}\right) \tag {27}
302
+ $$
303
+
304
+ Only SymODEN out of all models we consider provides the learned potential energy which is required to synthesize the controller. Figure 4 shows how the states evolve when the controller is fed into the OpenAI Gym simulator. We can successfully
305
+
306
+ ![](images/18a888a07b65c80d3e6ea2148862b87ca5a6430a0af1a6dbb5106319228f2eec.jpg)
307
+ Figure 4: Time-evolution of the state variables $(\cos q, \sin q, \dot{q})$ when the closed-loop control input $u(\cos q, \sin q, \dot{q})$ is governed by Equation (27). The thin black lines show the expected results.
308
+
309
+ ![](images/cd6ea3f6e10ccafc75af2263e508cd259d3ea899f4fd447d1f26b72e298a0794.jpg)
310
+
311
+ ![](images/5d40838601a957925d524b0bb125a2294b29d06b7a4347ac4a4b1e6d9b210453.jpg)
312
+
313
+ control the pendulum into the inverted position using the controller based on the learned model even though the absolute maximum control $u$ , 7.5, is more than three times larger than the absolute maximum $u$ in the training set, which is 2.0. This shows SymODEN extrapolates well.
314
+
315
+ # 4.4 TASK 3: CARTPOLE SYSTEM
316
+
317
+ The CartPole system is an underactuated system and to synthesize a controller to balance the pole from arbitrary initial condition requires trajectory optimization or kinetic energy shaping. We show that we can learn its dynamics and perform prediction in Section 4.6. We also train SymODEN in a fully-actuated version of the CartPole system (see Appendix E). The corresponding energy-based controller can bring the pole to the inverted position while driving the cart to the origin.
318
+
319
+ # 4.5 TASK 4: ACROBOT
320
+
321
+ The Acrobot is an underactuated double pendulum. As this system exhibits chaotic motion, it is not possible to predict its long-term behavior. However, Figure 6 shows that SymODEN can provide reasonably good short-term prediction. We also train SymODEN in a fully-actuated version of the Acrobot and show that we can control this system to reach the inverted position (see Appendix E).
322
+
323
+ # 4.6 RESULTS
324
+
325
+ In this subsection, we show the train error, prediction error, as well as the MSE and total energy of a sample test trajectory for all the tasks. Figure 5 shows the variation in train error and prediction error with changes in the number of initial state conditions in the training set. We can see that SymODEN yields better generalization in every task. In Task 3, although the Geometric Baseline Model yields lower train error in comparison to the other models, SymODEN generates more accurate predictions, indicating overfitting in the Geometric Baseline Model. By incorporating the physics-based prior of Hamiltonian dynamics, SymODEN learns dynamics that obeys physical laws and thus provides better predictions. In most cases, SymODEN trained with a smaller training dataset performs better than other models in terms of the train and prediction error, indicating that better generalization can be achieved even with fewer training samples.
326
+
327
+ Figure 6 shows the evolution of MSE and total energy along a trajectory with a previously unseen initial condition. For all the tasks, MSE of the baseline models diverges faster than SymODEN. Unstructured SymODEN performs well in all tasks except Task 3. As for the total energy, in Task 1 and Task 2, SymODEN and Unstructured SymODEN conserve total energy by oscillating around a constant value. In these models, the Hamiltonian itself is learned and the prediction of the future states
328
+
329
+ ![](images/f0689624e20f225afad3a3d0ec76102be6bfa9562f308f511f5f1e46f68e592e.jpg)
330
+
331
+ ![](images/ff9e94685947729de1fdab369a29c4180d9e1f3542e0863b45ea4827d07c0ee1.jpg)
332
+
333
+ ![](images/0ff339c4dd402471908a7dbc957953533fa63aaae18bd1062a342b4dc1ee7eef.jpg)
334
+
335
+ ![](images/35b596764b96f11e42d3dffd78f355d489f7d2326e4dc59983c89fad34396fd4.jpg)
336
+
337
+ ![](images/49bc4960fc3be9f826e9343f8ae4777057e32168ceec6a89dc98583cb56e6250.jpg)
338
+
339
+ ![](images/ed2c9c32b3b941a9d1b4e8467206ad15f35c1f0a28e4c791bf53961e0c016fab.jpg)
340
+
341
+ ![](images/aef7340ca6b6b85a544d8f7bcd0e35dc10ef969fb3cec5de44b51a0ce57977cf.jpg)
342
+
343
+ ![](images/786e8ac9b01f65f5ce8277b1821c009a9d363ccd56bf0c9f0b71aee0af043f74.jpg)
344
+
345
+ ![](images/39bf94ca1496a514484bab9e1f49409741f0a94d076040b1ce140ea45e937a8a.jpg)
346
+ Figure 5: Train error per trajectory and prediction error per trajectory for all 4 tasks with different number of training trajectories. Horizontal axis shows number of initial state conditions (16, 32, 64, 128, 256, 512, 1024) in the training set. Both the horizontal axis and vertical axis are in log scale.
347
+
348
+ ![](images/c5127869d58a7d7c1b3ac0e38f7ad2b463aec9d6b7caa852c1739a193a4334d4.jpg)
349
+
350
+ ![](images/8929aa245baa1e5ee35ce0cd06a80226d1ee36775a1d02de36ed425147f4eab0.jpg)
351
+
352
+ ![](images/366d5449ba540c64300f631e24da1d69e14378b31b176b0357f7fd10df04b879.jpg)
353
+
354
+ ![](images/be5d584c707d2089087ce894ffe427ab1b79467975f995e567792a2493436ece.jpg)
355
+ Figure 6: Mean square error and total energy of test trajectories. SymODEN works the best in terms of both MSE and total energy. Since SymODEN has learned the Hamiltonian and discovered the conservation from data the predicted trajectories match the ground truth. The ground truth of energy in all four tasks stay constant.
356
+
357
+ ![](images/b212adbd67fea1ad973a211a057c1cb02f5ad023262111c4cce0fb310ccae9a5.jpg)
358
+
359
+ ![](images/10d9d6077b31607a825087c4a9bcb6c3ee229e41ff037045956fe12539984e58.jpg)
360
+
361
+ ![](images/9304504d646488a2be16c682b0b558a3010295a63d5a9edd5f107f37e065ead9.jpg)
362
+
363
+ stay around a level set of the Hamiltonian. Baseline models, however, fail to find the conservation and the estimation of future states drift away from the initial Hamiltonian level set.
364
+
365
+ # 5 CONCLUSION
366
+
367
+ Here we have introduced Symplectic ODE-Net which provides a systematic way to incorporate prior knowledge of Hamiltonian dynamics with control into a deep learning framework. We show that SymODEN achieves better prediction with fewer training samples by learning an interpretable, physically-consistent state-space model. Future works will incorporate a broader class of physics-based prior, such as the port-Hamiltonian system formulation, to learn dynamics of a larger class of physical systems. SymODEN can work with embedded angle data or when we only have access to velocity instead of generalized momentum. Future works would explore other types of embedding, such as embedded 3D orientations. Another interesting direction could be to combine energy shaping control (potential as well as kinetic energy shaping) with interpretable end-to-end learning frameworks.
368
+
369
+ # REFERENCES
370
+
371
+ Vladimir I. Arnold, Alexander B. Givental, and Sergei P. Novikov. Symplectic geometry. In Dynamical systems IV, pp. 1-138. Springer, 2001.
372
+
373
+ Ibrahim Ayed, Emmanuel de Bézenac, Arthur Pajot, Julien Brajard, and Patrick Gallinari. Learning dynamical systems from partial observations. arXiv:1902.11136, 2019.
374
+
375
+ Jonathan Baxter. A model of inductive bias learning. Journal of Artificial Intelligence Research, 12: 149-198, 2000.
376
+ Anthony M. Bloch, Naomi E. Leonard, and Jerrold E. Marsden. Controlled lagrangians and the stabilization of euler-poincaré mechanical systems. International Journal of Robust and Nonlinear Control, 11(3):191-214, 2001.
377
+ Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. OpenAI Gym. arXiv:1606.01540, 2016.
378
+ Arunkumar Byravan and Dieter Fox. Se3-nets: Learning rigid body motion using deep neural networks. In 2017 IEEE International Conference on Robotics and Automation (ICRA), pp. 173-180. IEEE, 2017.
379
+ Dong E. Chang, Anthony M. Bloch, Naomi E. Leonard, Jerrold E. Marsden, and Craig A. Woolsey. The equivalence of controlled lagrangian and controlled hamiltonian systems. ESAIM: Control, Optimisation and Calculus of Variations, 8:393-422, 2002.
380
+ Tian Q. Chen, Yulia Rubanova, Jesse Bettencourt, and David K. Duvenaud. Neural ordinary differential equations. In Advances in Neural Information Processing Systems 31, pp. 6571-6583. 2018.
381
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171-4186, 2019.
382
+ Herbert Goldstein, Charles Poole, and John Safko. Classical mechanics, 2002.
383
+ Ian Goodfellow, Aaron Courville, and Yoshua Bengio. Deep learning, volume 1. MIT Press, 2016.
384
+ Sam Greydanus, Misko Dzamba, and Jason Yosinski. Hamiltonian Neural Networks. arXiv:1906.01563, 2019.
385
+ Jayesh K. Gupta, Kunal Menda, Zachary Manchester, and Mykel J. Kochenderfer. A general framework for structured learning of mechanical systems. arXiv:1902.08705, 2019.
386
+ David Haussler. Quantifying inductive bias: AI learning algorithms and Valiant's learning framework. Artificial Intelligence, 36(2):177-221, 1988.
387
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp. 770-778, 2016.
388
+ Maximilian Karl, Maximilian Soelch, Justin Bayer, and Patrick van der Smagt. Deep variational bayes filters: Unsupervised learning of state space models from raw data. arXiv:1605.06432, 2016.
389
+ Diederik P. Kingma and Jimmy Ba. Adam: A Method for Stochastic Optimization. arXiv:1412.6980, 2014.
390
+ Rahul G. Krishnan, Uri Shalit, and David Sontag. Structured inference networks for nonlinear state space models. In Thirty-First AAAI Conference on Artificial Intelligence, 2017.
391
+ Timothy P. Lillicrap, Jonathan J. Hunt, Alexander Pritzel, Nicolas Heess, Tom Erez, Yuval Tassa, David Silver, and Daan Wierstra. Continuous control with deep reinforcement learning. arXiv:1509.02971, 2015.
392
+ Michael Lutter, Christian Ritter, and Jan Peters. Deep lagrangian networks: Using physics as model prior for deep learning. In 7th International Conference on Learning Representations (ICLR), 2019.
393
+ Kumpati S. Narendra and Kannan Parthasarathy. Identification and control of dynamical systems using neural networks. IEEE Transactions on Neural Networks, 1(1):4-27, 1990.
394
+
395
+ Romeo Ortega, Arjan J. Van Der Schaft, Iven Mareels, and Bernhard Maschke. Putting energy back in control. IEEE Control Systems Magazine, 21(2):18-33, 2001.
396
+ Romeo Ortega, Arjan J. Van Der Schaft, Bernhard Maschke, and Gerardo Escobar. Interconnection and damping assignment passivity-based control of port-controlled hamiltonian systems. Automatica, 38(4):585-596, 2002.
397
+ David J. Rowe, Arthur Ryman, and George Rosensteel. Many-body quantum mechanics as a symplectic dynamical system. Physical Review A, 22(6):2362, 1980.
398
+ Alvaro Sanchez-Gonzalez, Nicolas Heess, Jost T. Springenberg, Josh Merel, Martin Riedmiller, Raia Hadsell, and Peter Battaglia. Graph networks as learnable physics engines for inference and control. In International Conference on Machine Learning (ICML), pp. 4467-4476, 2018.
399
+ David Silver, Julian Schrittwieser, Karen Simonyan, Ioannis Antonoglou, Aja Huang, Arthur Guez, Thomas Hubert, Lucas Baker, Matthew Lai, Adrian Bolton, et al. Mastering the game of go without human knowledge. Nature, 550(7676):354, 2017.
400
+ Torsten Söderström and Petre Stoica. System identification. Prentice-Hall, Inc., 1988.
401
+ Manuel Watter, Jost Springenberg, Joschka Boedecker, and Martin Riedmiller. Embed to control: A locally linear latent dynamics model for control from raw images. In Advances in Neural Information Processing 29, pp. 2746-2754, 2015.
402
+ Tianshu Wei, Yanzhi Wang, and Qi Zhu. Deep Reinforcement Learning for Building HVAC Control. In Proceedings of the 54th Annual Design Automation Conference (DAC), pp. 22:1-22:6, 2017.
403
+
404
+ # Appendices
405
+
406
+ # A EXPERIMENT IMPLEMENTATION DETAILS
407
+
408
+ The architectures used for our experiments are shown below. For all the tasks, SymODEN has the lowest number of total parameters. To ensure that the learned function is smooth, we use Tanh activation function instead of ReLu. As we have differentiation in the computation graph, nonsmooth activation functions would lead to discontinuities in the derivatives. This, in turn, would result in an ODE with a discontinuous RHS which is not desirable. All the architectures shown below are fully-connected neural networks. The first number indicates the dimension of the input layer. The last number indicates the dimension of output layer. The dimension of hidden layers is shown in the middle along with the activation functions.
409
+
410
+ # Task 1: Pendulum
411
+
412
+ - Input: 2 state dimensions, 1 action dimension
413
+ - Baseline Model (0.36M parameters): 2 - 600Tanh - 600Tanh - 2Linear
414
+ - Unstructured SymODEN (0.20M parameters):
415
+ - $H_{\theta_1,\theta_2}$ : 2 - 400Tanh - 400Tanh - 1Linear
416
+ - $g_{\theta_3}$ : 1 - 200Tanh - 200Tanh - 1Linear
417
+
418
+ - SymODEN (0.13M parameters):
419
+
420
+ - $M_{\theta_1}^{-1}$ : 1 - 300Tanh - 300Tanh - 1Linear
421
+ - $V_{\theta_2}$ : 1 - 50Tanh - 50Tanh - 1Linear
422
+ - $g_{\theta_3}$ : 1 - 200Tanh - 200Tanh - 1Linear
423
+
424
+ # Task 2: Pendulum with embedded data
425
+
426
+ - Input: 3 state dimensions, 1 action dimension
427
+
428
+ - Naive Baseline Model (0.65M parameters): 4 - 800Tanh - 800Tanh - 3Linear
429
+
430
+ - Geometric Baseline Model (0.46M parameters):
431
+
432
+ - $M_{\theta_1}^{-1} = L_{\theta_1}L_{\theta_1}^T$ , where $L_{\theta_1}$ : 2 - 300Tanh - 300Tanh - 300Tanh - 1Linear
433
+ - approximate $(\dot{q},\dot{p})$ : 4 - 600Tanh - 600Tanh - 2Linear
434
+
435
+ - Unstructured SymODEN (0.39M parameters):
436
+
437
+ - $M_{\theta_1}^{-1} = L_{\theta_1}L_{\theta_1}^T$ , where $L_{\theta_1}$ : 2 - 300Tanh - 300Tanh - 300Tanh - 1Linear
438
+ - $H_{\theta_2}$ : 3 - 500Tanh - 500Tanh - 1Linear
439
+ - $g_{\theta_3}$ : 2 - 200Tanh - 200Tanh - 1Linear
440
+
441
+ - SymODEN (0.14M parameters):
442
+
443
+ - $M_{\theta_1}^{-1} = L_{\theta_1}L_{\theta_1}^T$ , where $L_{\theta_1}$ : 2 - 300Tanh - 300Tanh - 300Tanh - 1Linear
444
+ - $V_{\theta_2}$ : 2 - 50Tanh - 50Tanh - 1Linear
445
+ - $g_{\theta_3}$ : 2 - 200Tanh - 200Tanh - 1Linear
446
+
447
+ # Task 3: CartPole
448
+
449
+ - Input: 5 state dimensions, 1 action dimension
450
+ - Naive Baseline Model (1.01M parameters): 6 - 1000Tanh - 1000Tanh - 5Linear
451
+
452
+ - Geometric Baseline Model (0.82M parameters):
453
+
454
+ - $M_{\theta_1}^{-1} = L_{\theta_1}L_{\theta_1}^T$ , where $L_{\theta_1}$ : 3 - 400Tanh - 400Tanh - 400Tanh - 3Linear
455
+ - approximate $(\dot{\mathbf{q}},\dot{\mathbf{p}})$ : 6 - 700Tanh - 700Tanh - 4Linear
456
+
457
+ - Unstructured SymODEN (0.67M parameters):
458
+
459
+ - $M_{\theta_1}^{-1} = L_{\theta_1}L_{\theta_1}^T$ , where $L_{\theta_1}$ : 3 - 400Tanh - 400Tanh - 400Tanh - 3Linear
460
+ - $H_{\theta_2}$ : 5 - 500Tanh - 500Tanh - 1Linear
461
+ - $g_{\theta_3}$ : 3 - 300Tanh - 300Tanh - 2Linear
462
+
463
+ - SymODEN (0.51M parameters):
464
+
465
+ - $M_{\theta_1}^{-1} = L_{\theta_1}L_{\theta_1}^T$ , where $L_{\theta_1}$ : 3 - 400Tanh - 400Tanh - 400Tanh - 3Linear
466
+ - $V_{\theta_2}$ : 3 - 300Tanh - 300Tanh - 1Linear
467
+ - $g_{\theta_3}$ : 3 - 300Tanh - 300Tanh - 2Linear
468
+
469
+ # Task 4:Acrobot
470
+
471
+ - Input: 6 state dimensions, 1 action dimension
472
+ - Naive Baseline Model (1.46M parameters): 7 - 1200Tanh - 1200Tanh - 6Linear
473
+
474
+ - Geometric Baseline Model (0.97M parameters):
475
+
476
+ - $M_{\theta_1}^{-1} = L_{\theta_1}L_{\theta_1}^T$ , where $L_{\theta_1}: 4 - 400\mathrm{Tanh} - 400\mathrm{Tanh} - 400\mathrm{Tanh} - 3\mathrm{Linear}$
477
+ - approximate $(\dot{\mathbf{q}},\dot{\mathbf{p}})$ : 7 - 800Tanh - 800Tanh - 4Linear
478
+
479
+ - Unstructured SymODEN (0.78M parameters):
480
+
481
+ - $M_{\theta_1}^{-1} = L_{\theta_1}L_{\theta_1}^T$ , where $L_{\theta_1}$ : 4 - 400Tanh - 400Tanh - 400Tanh - 3Linear
482
+ - $H_{\theta_2}$ : 6 - 600Tanh - 600Tanh - 1Linear
483
+ - $g_{\theta_3}$ : 4 - 300Tanh - 300Tanh - 2Linear
484
+
485
+ - SymODEN (0.51M parameters):
486
+
487
+ - $M_{\theta_1}^{-1} = L_{\theta_1}L_{\theta_1}^T$ , where $L_{\theta_1}$ : 4 - 400Tanh - 400Tanh - 400Tanh - 3Linear
488
+ - $V_{\theta_2}$ : 4 - 300Tanh - 300Tanh - 1Linear
489
+ - $g_{\theta_3}$ : 4 - 300Tanh - 300Tanh - 2Linear
490
+
491
+ # B SPECIAL CASE OF ENERGY-BASED CONTROLLER - PD CONTROLLER WITH ENERGY COMPENSATION
492
+
493
+ The energy-based controller has the form $\mathbf{u}(\mathbf{q},\mathbf{p}) = \beta (\mathbf{q}) + \mathbf{v}(\mathbf{p})$ , where the potential energy shaping term $\beta (\mathbf{q})$ and the damping injection term $\mathbf{v}(\mathbf{p})$ are given by Equation (7) and Equation (8), respectively.
494
+
495
+ If the desired potential energy $V_{q}(\mathbf{q})$ is given by a quadratic, as in Equation (9), then
496
+
497
+ $$
498
+ \begin{array}{l} \boldsymbol {\beta} (\mathbf {q}) = \mathbf {g} ^ {T} (\mathbf {g} \mathbf {g} ^ {T}) ^ {- 1} \left(\frac {\partial V}{\partial \mathbf {q}} - \frac {\partial V _ {d}}{\partial \mathbf {q}}\right) \\ = \mathbf {g} ^ {T} \left(\mathbf {g} \mathbf {g} ^ {T}\right) ^ {- 1} \left(\frac {\partial V}{\partial \mathbf {q}} - \mathbf {K} _ {p} (\mathbf {q} - \mathbf {q} ^ {\star})\right), \tag {28} \\ \end{array}
499
+ $$
500
+
501
+ and the controller can be expressed as
502
+
503
+ $$
504
+ \mathbf {u} (\mathbf {q}, \mathbf {p}) = \boldsymbol {\beta} (\mathbf {q}) + \mathbf {v} (\mathbf {p}) = \mathbf {g} ^ {T} \left(\mathbf {g} \mathbf {g} ^ {T}\right) ^ {- 1} \left(\frac {\partial V}{\partial \mathbf {q}} - \mathbf {K} _ {p} (\mathbf {q} - \mathbf {q} ^ {\star}) - \mathbf {K} _ {d} \mathbf {p}\right). \tag {29}
505
+ $$
506
+
507
+ The corresponding external forcing term is then given by
508
+
509
+ $$
510
+ \mathbf {g} (\mathbf {q}) \mathbf {u} = \frac {\partial V}{\partial \mathbf {q}} - \mathbf {K} _ {p} \left(\mathbf {q} - \mathbf {q} ^ {\star}\right) - \mathbf {K} _ {d} \mathbf {p}, \tag {30}
511
+ $$
512
+
513
+ which is same as Equation (10) in the main body of the paper. The first term in this external forcing provides an energy compensation, whereas the second term and the last term are proportional and derivative control terms, respectively. Thus, this control can be perceived as a PD controller with an additional energy compensation.
514
+
515
+ # C ABLATION STUDY OF DIFFERENTIABLE ODE SOLVER
516
+
517
+ In Hamiltonian Neural Networks (HNN), Greydanus et al. (2019) incorporate the Hamiltonian structure into learning by minimizing the difference between the symplectic gradients and the true gradients. When the true gradient is not available, which is often the case, the authors suggested using finite difference approximations. In SymODEN, true gradients or gradient approximations are not necessary since we integrate the estimated gradient using differentiable ODE solvers and set up the loss function with the integrated values. Here we perform an ablation study of the differentiable ODE Solver.
518
+
519
+ Both HNN and the Unstructured SymODEN approximate the Hamiltonian by a neural network and the main difference is the differentiable ODE solver, so we compare the performance of HNN and the Unstructured SymODEN. We set the time horizon $\tau = 1$ since it naturally corresponds to the finite difference estimate of the gradient. A larger $\tau$ would correspond to higher-order estimates of gradients. Since there is no angle-aware design in HNN, we use Task 1 to compare the performance of these two models.
520
+
521
+ We generate 25 training trajectories, each of which contains 45 time steps. This is consistent with the HNN paper. In the HNN paper Greydanus et al. (2019), the initial conditions of the trajectories are generated randomly in an annulus, whereas in this paper, we generate the initial state conditions uniformly in a reasonable range in each state dimension. We guess the reason that the authors of HNN choose the annulus data generation is that they do not have an angle-aware design. Take the pendulum for example; all the training and test trajectories they generate do not pass the inverted position. If they make prediction on a trajectory with a large enough initial speed, the angle would go over $\pm 2\pi$ , $\pm 4\pi$ , etc. in the long run. Since these are away from the region where the model gets trained, we can expect the prediction would be poor. In fact, this motivates us to design the angle-aware SymODEN in Section 3.3. In this ablation study, we generate the training data in both ways.
522
+
523
+ Table 1 shows the train error and the prediction error per trajectory of the two models. We can see Unstructured SymODEN performs better than HNN. This is an expected result. To see why this is the case, let us assume the training loss per time step of HNN is similar to that of Unstructured SymODEN. Since the training loss is on the symplectic gradient, the error would accumulate while integrating the symplectic gradient to get the estimated state values, and MSE of the state values
524
+
525
+ ![](images/c4843cb365ccacd1f37c1c7d4ef8063d1a12b4b1ba29a84a1215c1dd60e007a5.jpg)
526
+ Figure 7: MSE and Total energy of a sample test trajectory. Left two figures: the training data for the models are randomly generated in an annulus, the same as in HNN. Right two figures: the training data for the models are randomly generated in a rectangle - the same way that we use in SymODEN.
527
+
528
+ ![](images/fe178b615eb0c3f6814536b2b3993fc5c023f4df7f8735460d45ad610dddee6d.jpg)
529
+
530
+ ![](images/12e5cbd68daecc71be9dc7af7313ba699f6fa070b767a5f69a9e235d3e02fd53.jpg)
531
+
532
+ ![](images/6d8f8fe51ddda9a71bd66e7173f12da106cdb9b8fe8d324f7b34ae08564b7ffb.jpg)
533
+
534
+ would likely be one order of magnitude greater than that of Unstructured SymODEN. Figure 7 shows the MSE and total energy of a particular trajectory. It is clear that the MSE of the Unstructured SymODEN is lower than that of HNN. The MSE of HNN periodically touches zero does not mean it has a good prediction at that time step. Since the trajectories in the phase space are closed circles, those zeros mean the predicted trajectory of HNN lags behind (or runs ahead of) the true trajectory by one or more circles. Also, the energy of the HNN trajectory drifts instead of staying constant, probably because the finite difference approximation is not accurate enough.
535
+
536
+ Table 1: Train error and prediction error per trajectory of Unstructured SymODEN and HNN. The train error per trajectory is the sum of MSE of all the 45 timesteps averaged over the 25 training trajectories. The prediction error per trajectory is the sum of MSE of 90 timesteps in a trajectory.
537
+
538
+ <table><tr><td rowspan="2">Models</td><td colspan="2">annulus training data</td><td colspan="2">rectangle training data</td></tr><tr><td>train error</td><td>prediction error</td><td>train error</td><td>prediction error</td></tr><tr><td>Unstructured SymODEN</td><td>56.59</td><td>440.78</td><td>502.60</td><td>4363.87</td></tr><tr><td>HNN</td><td>290.67</td><td>564.16</td><td>5457.80</td><td>26209.17</td></tr></table>
539
+
540
+ # D EFFECTS OF THE TIME HORIZON $\tau$
541
+
542
+ Incorporating the differential ODE solver also introduces two hyperparameters: solver types and time horizon $\tau$ . For the solver types, the Euler solver is not accurate enough for our tasks. The adaptive solver "dopri5" lead to similar train error, test error and prediction error as the RK4 solver, but requires more time during training. Thus, in our experiments, we choose RK4.
543
+
544
+ Time horizon $\tau$ is the number of points we use to construct our loss function. Table 2 shows the train error, test error and prediction error per trajectory in Task 2 when $\tau$ is varied from 1 to 5. We can see that longer time horizons lead to better models. This is expected since long time horizons penalize worse long term predictions. We also observe in our experiments that longer time horizons require more time to train the models.
545
+
546
+ Table 2: Train error, test error and prediction error per trajectory of Task 2
547
+
548
+ <table><tr><td>Time Horizon</td><td>τ = 1</td><td>τ = 2</td><td>τ = 3</td><td>τ = 4</td><td>τ = 5</td></tr><tr><td>Train Error</td><td>0.744</td><td>0.136</td><td>0.068</td><td>0.033</td><td>0.017</td></tr><tr><td>Test Error</td><td>0.579</td><td>0.098</td><td>0.052</td><td>0.024</td><td>0.012</td></tr><tr><td>Prediction Error</td><td>3.138</td><td>0.502</td><td>0.199</td><td>0.095</td><td>0.048</td></tr></table>
549
+
550
+ # E FULLY-ACTUATED CARTPOLE AND ACROBOT
551
+
552
+ CartPole and Acrobot are underactuated systems. Incorporating the control of underactuated systems into the end-to-end learning framework is our future work. Here we trained SymODEN on fully-actuated versions of Cartpole and Acrobot and synthesized controllers based on the learned model.
553
+
554
+ For the fully-actuated CartPole, Figure 8 shows the snapshots of the system of a controlled trajectory with an initial condition where the pole is below the horizon. Figure 9 shows the time series of state
555
+
556
+ variables and control inputs. We can successfully learn the dynamics and control the pole to the inverted position and the cart to the origin.
557
+
558
+ <table><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr></table>
559
+
560
+ ![](images/442d45a7400cb038956e8217919c6982a84dac783e3fd190d1cc9dcc6b9c532b.jpg)
561
+ Figure 8: Snapshots of a controlled trajectory of the fully-actuated CartPole system with a 0.3s time interval.
562
+
563
+ ![](images/20bf8b07edb5bc3929c8c94bacb5d00f44a16ec9715219d9147b828b8213a5ce.jpg)
564
+
565
+ ![](images/f464d389c2eb95c68cb9df1e4a94946296955d338144e27ac235db0330a62283.jpg)
566
+ Figure 9: Time series of state variables and control inputs of a controlled trajectory shown in Figure 8. Black reference lines indicate expected value in the end.
567
+
568
+ For the fully-actuated Acrobot, Figure 10 shows the snapshots of a controlled trajectory. Figure 11 shows the time series of state variables and control inputs. We can successfully control the Acrobot from the downward position to the upward position, though the final value of $q_{2}$ is a little away from zero. Taking into account that the dynamics has been learned with only 64 different initial state conditions, it is most likely that the upward position did not show up in the training data.
569
+
570
+ ![](images/835d4d15c48ba5acf6e85e2d855f13f8eb35ce8c496dba77bf2a41b6f793cf68.jpg)
571
+ Figure 10: Snapshots of a controlled trajectory of the fully-actuated Acrobot system with a 1s time interval.
572
+
573
+ ![](images/ba73c9d9e9aa09c93d14f0906caaa9758acf44b4855a2e635a61e470501e9ec1.jpg)
574
+
575
+ ![](images/965330ed0e98b7adcfdeace45bac639dcc94a1fb420e6610dc2be1aa7689b678.jpg)
576
+
577
+ ![](images/9ed0b75868a4398c61b50fca94c03d8860fdc36b2fe0e0c776af109db15c6f94.jpg)
578
+
579
+ ![](images/762010b0746490ad0094440f77587d82794e8e02f3f1ee6b0c9c2fe8cc9e67fa.jpg)
580
+
581
+ ![](images/8a7e8208e36a90bdb6fdfbd1e48bd89c2d280e6b904ac73545ba0293365dec6f.jpg)
582
+
583
+ ![](images/7123288829d9b2f7770f31e98b2eb0759c33a134506c02b4261c8b2e16386f86.jpg)
584
+
585
+ ![](images/e89e649d256a63f1c4434130cc8400281e4a0c06a595b6adced95b9a0f84ddb5.jpg)
586
+
587
+ ![](images/421523db1cca6177b514456113bcd1fdb2b22c8cb64e2cf0110033d32574ab34.jpg)
588
+
589
+ ![](images/a25a2a8613ef924ed9235e8b842b8ee69cb010f012a7ac9d0f5a47d245080280.jpg)
590
+
591
+ ![](images/59e2c609e8feb5e2a27b98f2ca6054f5f25ace4a24e836653d89c5a27e731036.jpg)
592
+ Figure 11: Time series of state variables and control inputs of a controlled trajectory shown in Figure 10. Black reference lines indicate expected value in the end.
593
+
594
+ ![](images/c144017edd9cc6f268502427dc82029cbbf6164d727c825922e14d4ea2f4cc8a.jpg)
595
+
596
+ ![](images/37ae84782b3d28977a9bb3cdb5fc7c6de975a151a319ed86de9247cb9503d984.jpg)
597
+
598
+ # F TEST ERRORS OF THE TASKS
599
+
600
+ Here we show statistics of train, test, and prediction per trajectory in all four tasks. The train errors are based on 64 initial state conditions and 5 constant inputs. The test errors are based on 64 previously unseen initial state conditions and the same 5 constant inputs. Each trajectory in the train and test set contains 20 steps. The prediction error is based on the same 64 initial state conditions (during training) and zero inputs.
601
+
602
+ Table 3: Train, Test and Prediction errors of the Four Tasks
603
+
604
+ <table><tr><td></td><td>Naive Baseline</td><td>Geometric Baseline</td><td>Unstructured Symplectic-ODE</td><td>Symplectic-ODE</td></tr><tr><td colspan="5">Task 1: Pendulum</td></tr><tr><td>Model Parameter</td><td>0.36M</td><td>N/A</td><td>0.20M</td><td>0.13M</td></tr><tr><td>Train error</td><td>30.82 ± 43.45</td><td>N/A</td><td>0.89 ± 2.76</td><td>1.50 ± 4.17</td></tr><tr><td>Test error</td><td>40.99 ± 56.28</td><td>N/A</td><td>2.74 ± 9.94</td><td>2.34 ± 5.79</td></tr><tr><td>Prediction error</td><td>37.87 ± 117.02</td><td>N/A</td><td>17.17 ± 71.48</td><td>23.95 ± 66.61</td></tr><tr><td colspan="5">Task 2: Pendulum (embed)</td></tr><tr><td>Model Parameter</td><td>0.65M</td><td>0.46M</td><td>0.39M</td><td>0.14M</td></tr><tr><td>Train error</td><td>2.31 ± 3.72</td><td>0.59 ± 1.634</td><td>1.76 ± 3.69</td><td>0.067 ± 0.276</td></tr><tr><td>Test error</td><td>2.18 ± 3.59</td><td>0.49 ± 1.762</td><td>1.41 ± 2.82</td><td>0.052 ± 0.241</td></tr><tr><td>Prediction error</td><td>317.21 ± 521.46</td><td>14.31 ± 29.54</td><td>3.69 ± 7.72</td><td>0.20 ± 0.49</td></tr><tr><td colspan="5">Task3: CartPole</td></tr><tr><td>Model Parameter</td><td>1.01M</td><td>0.82M</td><td>0.67M</td><td>0.51M</td></tr><tr><td>Train error</td><td>15.53 ± 22.52</td><td>0.45 ± 0.37</td><td>4.84 ± 4.42</td><td>1.78 ± 1.81</td></tr><tr><td>Test error</td><td>25.42 ± 38.49</td><td>1.20 ± 2.67</td><td>6.90 ± 8.66</td><td>1.89 ± 1.81</td></tr><tr><td>Prediction error</td><td>332.44 ± 245.24</td><td>52.26 ± 73.25</td><td>225.22 ± 194.24</td><td>11.41 ± 16.06</td></tr><tr><td colspan="5">Task 4: Acrobot</td></tr><tr><td>Model Parameter</td><td>1.46M</td><td>0.97M</td><td>0.78M</td><td>0.51M</td></tr><tr><td>Train error</td><td>2.04 ± 2.90</td><td>2.07 ± 3.72</td><td>1.32 ± 2.08</td><td>0.25 ± 0.39</td></tr><tr><td>Test error</td><td>5.62 ± 9.29</td><td>5.12 ± 7.25</td><td>3.33 ± 6.00</td><td>0.28 ± 0.48</td></tr><tr><td>Prediction error</td><td>64.61 ± 145.20</td><td>26.68 ± 34.90</td><td>9.72 ± 16.58</td><td>2.07 ± 5.26</td></tr></table>
symplecticodenetlearninghamiltoniandynamicswithcontrol/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25a12b3d0c42b4177656be814dd219979930bfc1730ba26559da9f2561c19987
3
+ size 799656
symplecticodenetlearninghamiltoniandynamicswithcontrol/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:549723a9cc89e99668638bbe860d955c160c2b7e655a26a6f4c7fff0e575bd91
3
+ size 725778
synthesizingprogrammaticpoliciesthatinductivelygeneralize/aab0d129-5e56-4707-9e10-54a49eb1f2d9_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba8de167d69f0c5db1076ebc705a03d30219e26acdb6201ff03f7d20a881eaf7
3
+ size 131828
synthesizingprogrammaticpoliciesthatinductivelygeneralize/aab0d129-5e56-4707-9e10-54a49eb1f2d9_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:928b2791efeab0a71a5eb4d12fbcc20bc5402cf678506d348e3bd8d8ea939170
3
+ size 154402
synthesizingprogrammaticpoliciesthatinductivelygeneralize/aab0d129-5e56-4707-9e10-54a49eb1f2d9_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6878965b165701bf37fb02dab66edab4fc952b3e67f4dccd2d37061860edc3b8
3
+ size 961407
synthesizingprogrammaticpoliciesthatinductivelygeneralize/full.md ADDED
@@ -0,0 +1,549 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SYNTHESIZING PROGRAMMATIC POLICIES THAT INDUCTIVELY GENERALIZE
2
+
3
+ Jeevana Priya Inala
4
+
5
+ MIT CSAIL
6
+
7
+ jinala@csail.mit.edu
8
+
9
+ Osbert Bastani
10
+
11
+ University of Pennsylvania
12
+
13
+ obastani@seas.upenn.edu
14
+
15
+ Zenna Tavares
16
+
17
+ MIT CSAIL
18
+
19
+ zenna@mit.edu
20
+
21
+ Armando Solar-Lezama
22
+
23
+ MIT CSAIL
24
+
25
+ asolar@csail.mit.edu
26
+
27
+ # ABSTRACT
28
+
29
+ Deep reinforcement learning has successfully solved a number of challenging control tasks. However, learned policies typically have difficulty generalizing to novel environments. We propose an algorithm for learning programmatic state machine policies that can capture repeating behaviors. By doing so, they have the ability to generalize to instances requiring an arbitrary number of repetitions, a property we call inductive generalization. However, state machine policies are hard to learn since they consist of a combination of continuous and discrete structures. We propose a learning framework called adaptive teaching, which learns a state machine policy by imitating a teacher; in contrast to traditional imitation learning, our teacher adaptively updates itself based on the structure of the student. We show that our algorithm can be used to learn policies that inductively generalize to novel environments, whereas traditional neural network policies fail to do so.
30
+
31
+ # 1 INTRODUCTION
32
+
33
+ Existing deep reinforcement learning (RL) approaches have difficulty generalizing to novel environments (Packer et al., 2018; Rajeswaran et al., 2017). More specifically, consider a task that requires performing a repeating behavior—we would like to be able to learn a policy that generalizes to instances requiring an arbitrary number of repetitions. We refer to this property as inductive generalization. In supervised learning, specialized neural network architectures have been proposed that exhibit inductive generalization on tasks such as list manipulation (Cai et al., 2017), but it is not obvious how those techniques would generalize to the control problems discussed in this paper. Alternatively, algorithms have been proposed for learning programmatic policies for control problems that generalize better than traditional neural network policies (Verma et al., 2019; 2018), but existing approaches have focused on simple stateless policies that make learning generalizable repetitive behaviors hard, e.g., a stateless program cannot internally keep track of the number of repetitions made so far and decide the next action based on that progress.
34
+
35
+ We propose an algorithm for learning programmatic state machine policies. Such a policy consists of a set of internal states, called modes, each of which is associated with a controller that is applied while in that mode. The policy also includes transition predicates that describe how the mode is updated. These policies are sufficiently expressive to capture tasks of interest—e.g., they can perform repeating tasks by cycling through some subset of modes during execution. Additionally, state machine policies are strongly biased towards policies that inductively generalize, that deep RL policies lack. In other words, this policy class is both realizable (i.e., it contains a "right" policy that solves the problem for all environments) and identifiable (i.e., we can learn the right policy from limited data).
36
+
37
+ However, state machine policies are challenging to learn because their discrete state transitions make it difficult to use gradient-based optimization. One standard solution is to "soften" the state transitions by making them probabilistic. However, these techniques alone are insufficient; they still run into local optima due to the constraints on the structure of the policy function, as well as the relatively few parameters they possess.
38
+
39
+ ![](images/c1555061b871ac5c402d5ae333945f43bda0c1dd471060b9e6c5502c317f15af.jpg)
40
+
41
+ ![](images/7f8e32048cbd3c5791440482e5369662fb7653fcd68b32fc096082bd499505a9.jpg)
42
+
43
+ ![](images/e0e22411e539734705da26e40621105ca0d2ae4a70f77f0d4e128b6786ab6c44.jpg)
44
+
45
+ ![](images/78130a529ef01c68375f13b63c5aa9cb86e98cdd0094d0c6b8358188a0bafcf5.jpg)
46
+
47
+ ![](images/2975b49f122c2fb3c388a95c5883f42ca773eaea745a345bdb4c69e34631b289.jpg)
48
+ (a) Train 1
49
+ (b) Train 2
50
+ (c) Train 3
51
+ (d) Test
52
+ (e) State machine based policy. False edges are dropped.
53
+ Figure 1: Running example: retrieving an autonomous car from tight parking spots. The goal is to learn a state-machine policy (e) that is trained on scenarios (a), (b), and (c), that generalizes to scenario (d).
54
+
55
+ To address this issue, we propose an approach called adaptive teacher that alternatingly learns a teacher and a student. The teacher is an over-parameterized version of the student, which is a state-machine policy trained to mimic the teacher. Because the teacher is over-parameterized, it can be easily learned using model-based numerical optimization (but does not generalize as well as the student). Furthermore, our approach is different from traditional imitation learning (Schaal, 1999; Ross et al., 2011) since the teacher is regularized to favor strategies similar to the ones taken by the student, to ensure the student can successfully mimic the teacher. As the student improves, the teacher improves as well. This alternating optimization can naturally be derived within the framework of variational inference, where the teacher encodes the variational distribution (Wainwright et al., 2008).
56
+
57
+ We implement our algorithm and evaluate it on a set of reinforcement learning problems focused on tasks that require inductive generalization. We show that traditional deep RL approaches perform well on the original task, but fail to generalize inductively, whereas our state machine policies successfully generalize beyond the training distribution.
58
+
59
+ We emphasize that we do not focus on problems that require large state-machines, which is a qualitatively different problem from ours and would require different algorithms to solve. We believe that state-machines are most useful when only a few modes are required. In particular, we are interested in problems where a relatively simple behavior must be repeated a certain number of times to solve the given task. The key premise behind our approach, as shown by our evaluation, is that, in these cases, compact state-machines can represent policies that both have good performance and are generalizable. In fact, our algorithm solves all of our benchmarks using state-machine policies with at most 4 modes. When many modes are needed, then the number of possible transition structures grows exponentially, making it difficult to learn the "right" structure without having an exponential amount of training data.
60
+
61
+ Example. Consider the autonomous car in Figure 1, which consists of a blue car (the agent) parked between two stationary black cars. The system state is $(x,y,\theta ,d)$ , where $(x,y)$ is the center of the car, $\theta$ is the orientation, and $d$ is the distance between the two black cars. The actions are $(v,\psi)$ ,
62
+
63
+ where $v$ is velocity and $\psi$ is steering angle (we consider velocity control since the speed is low). The dynamics are standard bicycle dynamics. The goal is to drive out of the parked spot to an adjacent lane while avoiding collisions. This task is easy when $d$ is large (Figure 1a). It is somewhat more involved when $d$ is small, since it requires multiple maneuvers (Figures 1b and 1c). However, it becomes challenging when $d$ is very small (Figure 1d). A standard RL algorithm will train a policy that performs well on the distances seen during training but does not generalize to smaller distances. In contrast, our goal is to train an agent on scenarios (a), (b), and (c), that generalizes to scenario (d).
64
+
65
+ In Figure 1e, we show a state machine policy synthesized by our algorithm for this task. We use $d_{f}$ and $\bar{d}_b$ to denote the distances between the agent and the front and back black cars, respectively. This policy has three different modes (besides a start mode $m_{s}$ and an end mode $m_e$ ). Roughly speaking, this policy says (i) immediately shift from mode $m_{s}$ to $m_1$ , and drive the car forward and to the left, (ii) continue until close to the car in front; then, transition to mode $m_2$ , and drive the car backwards and to the right, (iii) continue until close to the car behind; then, transition back to mode $m_1$ , (iv) iterate between $m_1$ and $m_2$ until the car can safely exit the parking spot; then, transition to mode $m_3$ , and drive forward and to the right to make the car parallel to the lane. This policy inductively generalizes since it captures the iterative behavior of driving forward and then backward until exiting the parking spot. Thus, it successfully solves the scenario in Figure 1d.
66
+
67
+ Related work. There has been growing interest in using program synthesis to aid machine learning (Lake et al., 2015; Ellis et al., 2015; 2018; Valkov et al., 2018; Young et al., 2019). Our work is most closely related to recent work using imitation learning to learn programmatic policies (Verma et al., 2018; Bastani et al., 2018; Zhu et al., 2019; Verma et al., 2019). These approaches use a neural network policy as the teacher. However, they are focused on learning stateless policies and hence, they use a supervised dataset of state-action pairs from the teacher and a domain-specific program synthesizer to learn programmatic policies. Building such a synthesizer for state machine policies is challenging since they contain both discrete and continuous parameters and internal state. The student in our algorithm needs to learn the state-machine policy from entire "trajectory traces" to learn the internal state. In particular, each trajectory trace consists of the sequence of states and actions from the initial state to the goal state visited by the teacher, but also encodes which states correspond to mode changes for the teacher. In the teacher's iteration, the teacher's mode changes are regularized to align more closely with the possible student mode changes. As a consequence, in the student's iteration, it is easier for the student to mimic the teacher's mode changes. Leveraging this connection between the teacher structure and student structure is critical for us to be able to learn state-machine policies. Additionally, with the exception of (Verma et al., 2019), for the other approaches, there is no feedback from the student to the teacher.
68
+
69
+ State machines have been previously used to represent policies that have internal state (typically called memory). To learn these policies, gradient ascent methods assume a fixed structure and optimize over real-valued parameters (Meuleau et al., 1999; Peshkin et al., 2001; Aberdeen & Baxter, 2002), whereas policy iteration methods uses dynamic programming to extend the structure (Hansen, 1998). Our method combines both, but similarly to Poupart & Boutilier (2004), the structure space is bounded. In addition, programmatic state machines use programs to represent state transitions and actions rules, and as a result can perform well while remaining small in size. Hierarchies of Abstract Machines (HAM)s also use programmatic state machines for hierarchical reinforcement learning, but assumed a fixed, hand-designed structure (Parr & Russell, 1998; Andre & Russell, 2002).
70
+
71
+ Our inductive generalization goal is related to that of meta-learning (Finn et al., 2017); however, whereas meta-learning trains on a few examples from the novel environment, our goal is to generalize without additional training. Our work is also related to guided policy search, which uses a teacher in the form of a trajectory optimizer to train a neural network student (Levine & Koltun, 2013). However, training programmatic policies is more challenging since the teacher must mirror the structure of the student. Finally, it has recently been shown that over-parameterization is essential in helping neural networks avoid local minima (Allen-Zhu et al., 2019). Relaxing optimization problems by adding more parameters is a well established technique; in many cases, re-parameterization can make difficult non-convex problems solve efficiently (Carlone & Calafiore, 2018).
72
+
73
+ # 2 PROBLEM FORMULATION
74
+
75
+ Dynamics. We are interested in synthesizing control policies for deterministic, continuous-time dynamical systems with continuous state and action spaces. In particular, we consider partially observable Markov decision processes (POMDP) $\langle \mathcal{X},\mathcal{A},\mathcal{O},F,Z,X_0,\phi_S,\phi_G\rangle$ with states $\mathcal{X}\subseteq \mathbb{R}^{d_X}$ , actions $\mathcal{A}\subseteq \mathbb{R}^{d_A}$ , observations $\mathcal{O}\subseteq \mathbb{R}^{d_O}$ , deterministic dynamics $F:\mathcal{X}\times \mathcal{A}\to \mathcal{X}$ (i.e., $\dot{\mathbf{x}} = F(\mathbf{x},\mathbf{a})$ ), deterministic observation function $Z:\mathcal{X}\rightarrow \mathcal{O}$ , and initial state distribution $\mathbf{x}_0\sim X_0$ .
76
+
77
+ We consider a safety specification $\phi_S: \mathcal{X} \to \mathbb{R}$ and a goal specification $\phi_G: \mathcal{X} \to \mathbb{R}$ . Then, the agent aims to reach a goal state $\phi_G(\mathbf{x}) \leq 0$ while staying in safe states $\phi_S(\mathbf{x}) \leq 0$ . A positive value for $\phi_S(\mathbf{x})$ (resp., $\phi_G(\mathbf{x}))$ quantifies the degree to which $\mathbf{x}$ is unsafe (resp., away from the goal).
78
+
79
+ Policies. We consider policies with internal memory $\pi : \mathcal{O} \times \mathcal{S} \to \mathcal{A} \times \mathcal{S}$ where $\mathcal{S} \subseteq \mathbb{R}^{d_S}$ is the set of internal states; we assume the memory is initialized to a constant $\mathbf{s}_0$ . Given such a policy $\pi$ , we sample a rollout (or trajectory) $\tau = (\mathbf{x}_0, \mathbf{x}_1, \dots, \mathbf{x}_N)$ with horizon $N \in \mathbb{N}$ by sampling $\mathbf{x}_0 \sim X_0$ and then performing a discrete-time simulation $\mathbf{x}_{n+1} = \mathbf{x}_n + F(\mathbf{x}_n, \mathbf{a}_n) \cdot \Delta$ , where $(\mathbf{a}_n, \mathbf{s}_{n+1}) = \pi(Z(\mathbf{x}_n), \mathbf{s}_n)$ and $\Delta \in \mathbb{R}_{>0}$ is the time increment. Since $F$ , $Z$ , and $\pi$ are deterministic, $\tau$ is fully determined by $\mathbf{x}_0$ and $\pi$ ; $\tau$ can also be represented as a list of actions combined with the initial state i.e $\tau = \langle \mathbf{x}_0, (\mathbf{a}_0, \mathbf{a}_1, \dots, \mathbf{a}_N) \rangle$ .
80
+
81
+ The degree to which $\phi_S$ and $\phi_G$ are satisfied along a trajectory is quantified by a reward function $R(\pi, \mathbf{x}_0) = -\phi_G(\mathbf{x}_N)^+ - \sum_{n=0}^{N} \phi_S(\mathbf{x}_n)^+$ , where $x^+ = \max(0, x)$ . The optimal policy $\pi^*$ in some policy class $\Pi$ is one which maximizes the expected reward $\mathbb{E}_{\mathbf{x}_0 \sim X_0}[R(\pi, \mathbf{x}_0)]$ .
82
+
83
+ Inductive generalization. Beyond optimizing reward, we want a policy that inductively generalizes to unseen environments. Formally, we consider two initial state distributions: a training distribution $X_0^{\text{train}}$ , and a test distribution $X_0^{\text{test}}$ that includes the extreme states never encountered during training. Then, the goal is to train a policy according to $X_0^{\text{train}}$ —i.e.,
84
+
85
+ $$
86
+ \pi^ {*} = \underset {\pi \in \Pi} {\arg \max } \mathbb {E} _ {\mathbf {x} _ {0} \sim X _ {0} ^ {\text {t r a i n}}} [ R (\pi , \mathbf {x} _ {0}) ], \tag {1}
87
+ $$
88
+
89
+ but measure its performance according to $X_0^{\mathrm{test}}$ -i.e., $\mathbb{E}_{\mathbf{x}_0}\sim X_0^{\mathrm{test}}[R(\pi ,\mathbf{x}_0)]$
90
+
91
+ # 3 PROGRAMMATIC STATE MACHINE POLICIES
92
+
93
+ To achieve inductive generalization, we aim to synthesize programmatic policies in the form of state machines. At a high level, state machines can be thought of as compositions of much simpler policies, where the internal state of the state machines (called its mode) indicates which simple policy is currently being used. Thus, state machines are capable of encoding complex nonlinear control tasks such as iteratively repeating a complex sequence of actions (e.g., the car example in Figure 1). At the same time, state machines are substantially more structured than more typical policy classes such as neural networks and decision trees.
94
+
95
+ More precisely, a state machine $\pi$ is a tuple $\langle \mathcal{M},\mathcal{H},\mathcal{G},m_s,m_e\rangle$ . The modes $m_{i}\in \mathcal{M}$ of $\pi$ are the internal memory of the state machine. Each mode $m_{i}\in \mathcal{M}$ corresponds to an action function $H_{m_i}\in \mathcal{H}$ , which is a function $H_{m_i}:\mathcal{O}\to \mathcal{A}$ mapping observations to actions. When in mode $m_{i}$ , the agent takes action $\mathbf{a} = H_{m_i}(\mathbf{o})$ . Furthermore, each pair of modes $(m_i,m_j)$ corresponds to a switching condition $G_{m_i}^{m_j}\in \mathcal{G}$ , which is a function $G_{m_i}^{m_j}:\mathcal{O}\rightarrow \mathbb{R}$ . When an agent in mode $m_{i}$ observes $\mathbf{o}$ such that $G_{m_i}^{m_j}(\mathbf{o})\geq 0$ , then the agent transitions from mode $m_{i}$ to mode $m_j$ . If there are multiple modes $m_j$ with non-negative switching weight $G_{m_i}^{m_j}(\mathbf{o})\geq 0$ , then the agent transitions to the one that is greatest in magnitude; if there are several modes of equal weight, we take the first one according to a fixed ordering. Finally, $m_s,m_e\in \mathcal{M}$ are the start and end modes, respectively; the state machine mode is initialized to $m_s$ , and the state machine terminates when it transitions to $m_e$ .
96
+
97
+ Formally, $\pi (\mathbf{o}_n,\mathbf{s}_n) = (\mathbf{a}_n,\mathbf{s}_{n + 1})$ , where $\mathbf{a}_n = H_{\mathbf{s}_n}(\mathbf{o}_n)$ , $\mathbf{s}_0 = m_s$ and
98
+
99
+ $$
100
+ \mathbf {s} _ {n + 1} = \left\{ \begin{array}{l l} m ^ {*} = \operatorname {d a r g} \max _ {m} G _ {\mathbf {s} _ {n}} ^ {m} \left(\mathbf {o} _ {n}\right) & \text {i f} G _ {\mathbf {s} _ {n}} ^ {m ^ {*}} \left(\mathbf {o} _ {n}\right) \geq 0 \\ \mathbf {s} _ {n} & \text {o t h e r w i s e} \end{array} \right. \tag {2}
101
+ $$
102
+
103
+ where $\text{darg max}$ is a deterministic arg max that breaks ties as described above.
104
+
105
+ Action functions and switching conditions are specified by grammars that encode the space of possible functions as a space of programs. Different grammars can be used for different problems.
106
+
107
+ Typical grammars for action functions include constants $\{C_{\alpha}:\mathbf{o}\mapsto \alpha \}$ and proportional controls $\{P_{\alpha_0,\alpha_1}^i:\mathbf{o}\mapsto \alpha_0(\mathbf{o}[i] - \alpha_1)\}$ . A typical grammar for switching conditions is the grammar
108
+
109
+ $$
110
+ B := \left\{\mathbf {o} [ i ] \leq \alpha \right\} _ {i} \mid \left\{\mathbf {o} [ i ] \geq \alpha \right\} _ {i} \mid B _ {1} \wedge B _ {2} \mid B _ {1} \vee B _ {2}
111
+ $$
112
+
113
+ of Boolean predicates over the current observation $\mathbf{o}$ , where $\mathbf{o}[i]$ is the $i$ th component of $\mathbf{o}$ . In all these grammars, $\alpha_{i} \in \mathbb{R}$ are parameters to be learned. The grammar for switching conditions also has discrete parameters encoding the choice of expression. For example, in Figure 1, the action functions are constants, and the switching conditions are inequalities over components of $\mathbf{o}$ .
114
+
115
+ # 4 FRAMEWORK FOR SYNTHESIZING PROGRAMMATIC POLICIES
116
+
117
+ We now describe the adaptive teaching framework for synthesizing state machine policies. In this section, the teacher is abstractly represented as a collection of trajectories $\tau_{\mathbf{x}_0}$ (i.e., an open-loop controller consisting of a fixed sequence of actions) for each initial state $\mathbf{x}_0$ . A key insight is that we can parameterize $\tau_{\mathbf{x}_0}$ in a way that mirrors the structure of the state machine student. As we discuss in Section 4.2, we parameterize $\tau_{\mathbf{x}_0}$ as a "loop-free" state machine. Intuitively, our algorithm efficiently computes $\tau_{\mathbf{x}_0}$ (from multiple initial states $\mathbf{x}_0$ ) using gradient-based optimization, and then "glues" them together using maximum likelihood to construct a state machine policy.
118
+
119
+ # 4.1 ADAPTIVE TEACHING VIA VARIATIONAL INFERENCE
120
+
121
+ We derive the adaptive teaching formulation by reformulating the learning problem in the framework of probabilistic reinforcement learning, and also consider policies $\pi$ that are probabilistic state machines (see Section 4.3). Then, we use a variational approach to break the problem into the teacher and the student steps. In this approach, the log-likelihood of a policy $\pi$ is defined as follows:
122
+
123
+ $$
124
+ \ell (\pi) = \log \mathbb {E} _ {p (\tau | \pi)} [ e ^ {\lambda R (\tau)} ] \tag {3}
125
+ $$
126
+
127
+ where $p(\tau \mid \pi)$ is the probability of sampling rollout $\tau$ when using policy $\pi$ from a random initial state $\mathbf{x}_0$ , $\lambda \in \mathbb{R}_{\geq 0}$ is a hyperparameter, and $R(\tau)$ is the reward assigned to $\tau$ . We have
128
+
129
+ $$
130
+ \ell (\pi) = \log \mathbb {E} _ {q (\tau)} \left[ e ^ {\lambda R (\tau)} \cdot \frac {p (\tau \mid \pi)}{q (\tau)} \right] \geq \mathbb {E} _ {q (\tau)} [ \lambda R (\tau) + \log p (\tau | \pi) - \log q (\tau) ] \tag {4}
131
+ $$
132
+
133
+ where $q(\tau)$ is the variational distribution and the inequality follows from Jensen's inequality. Thus, we can optimize $\pi$ by maximizing the lower bound Eq (4) on $\ell(\pi)$ . Since the first and third term of Eq (4) are constant with respect to $\pi$ , we have
134
+
135
+ $$
136
+ \pi^ {*} = \underset {\pi} {\arg \max } \mathbb {E} _ {q (\tau)} [ \log p (\tau | \pi) ]. \tag {5}
137
+ $$
138
+
139
+ Next, the optimal choice for $q$ (i.e., to minimize the gap in the inequality in Eq (4)) is
140
+
141
+ $$
142
+ q ^ {*} = \underset {q} {\arg \min } D _ {\mathrm {K L}} (q (\tau) \| e ^ {\lambda R (\tau)} \cdot p (\tau \mid \pi) / Z) \tag {6}
143
+ $$
144
+
145
+ where $Z$ is a normalizing constant. We choose $q$ to have form $q(\tau) = p(\mathbf{x}_0) \cdot \delta (\tau -\tau_{\mathbf{x}_0})$ , where $\delta$ is the Dirac delta function, $p(\mathbf{x}_0)$ is the initial state distribution, and $\tau_{\mathbf{x}_0}$ are the parameters to be optimized, where $\tau_{\mathbf{x}_0}$ encodes a trajectory from $\mathbf{x}_0$ . Then, up to constants, the objective of Eq (6) equals
146
+
147
+ $$
148
+ \mathbb {E} _ {p (\mathbf {x} _ {0})} \left[ \log p (\mathbf {x} _ {0}) + \mathbb {E} _ {\delta (\tau - \tau_ {\mathbf {x} _ {0}})} [ \log \delta (\tau - \tau_ {\mathbf {x} _ {0}}) ] - (\lambda R (\tau_ {\mathbf {x} _ {0}}) + \log p (\tau_ {\mathbf {x} _ {0}} | \pi , \mathbf {x} _ {0})) \right].
149
+ $$
150
+
151
+ The first term is constant; the second term is degenerate, but it is also constant. Thus, we have
152
+
153
+ $$
154
+ q ^ {*} = \underset {\{\tau_ {\mathbf {x} _ {0}} \}} {\arg \max } \mathbb {E} _ {p (\mathbf {x} _ {0})} \left[ \lambda R \left(\tau_ {\mathbf {x} _ {0}}\right) + \log p \left(\tau_ {\mathbf {x} _ {0}} \mid \pi , \mathbf {x} _ {0}\right) \right]. \tag {7}
155
+ $$
156
+
157
+ Thus, we can optimize Eq (3) by alternatingly optimizing Eq (5) and Eq (7).
158
+
159
+ We interpret these equations as adaptive teaching. At a high level, the teacher (i.e., the variational distribution $q^{*}$ in Eq (7)) is used to guide the optimization of the student (i.e., the state machine policy $\pi^{*}$ in Eq (5)). Rather than compute the teacher in closed form, we approximate it by sampling
160
+
161
+ ![](images/49c10286f647ffed32acecb4d67b495585230319789199eff2bb4c2d44204e01.jpg)
162
+ Figure 2: Flowchart connecting the different components of the algorithm.
163
+
164
+ finitely many initial states $\mathbf{x}_0^k\sim X_0$ and then computing the optimal rollout from $\mathbf{x}_0^k$ . Formally, on the $i$ th iteration, the teacher and student are updated as follows:
165
+
166
+ Teacher $q_{i}^{*} = \sum_{k = 1}^{K}\delta (\tau_{k}^{i})$ (8)
167
+
168
+ where $\tau_k^i = \underset {\tau}{\arg \max}\lambda R(\tau) + \log p(\tau \mid \pi^{i - 1},\mathbf{x}_0^k)$ $(\mathbf{x}_0^k\sim X_0)$
169
+
170
+ Student $\pi_i^* = \arg \max_{\pi} \sum_{k=1}^{K} \log p(\tau_k^i \mid \pi, \mathbf{x}_0^k)$ (9)
171
+
172
+ The teacher objective Eq (8) is to both maximize the reward $R(\tau)$ from a random initial state $\mathbf{x}_0$ and to maximize the probability $p(\tau \mid \pi, \mathbf{x}_0)$ of obtaining the rollout $\tau$ from initial state $\mathbf{x}_0$ according to the current student $\pi$ . The latter encourages the teacher to match the structure of the student. Furthermore, the teacher is itself updated at each step to account for the changing structure of the student. The student objective Eq (9) is to imitate the distribution of rollouts according to the teacher. Figure 2 shows the different components of our algorithm.
173
+
174
+ # 4.2 TEACHER: COMPUTING LOOP-FREE POLICIES
175
+
176
+ We begin by describing how the teacher solves the trajectory optimization problem Eq (8)—i.e., computing $\tau_{k}$ for a given initial state $\mathbf{x}_0^k$ .
177
+
178
+ Parameterization. One approach is to parameterize $\tau$ as an arbitrary action sequence $(\mathbf{a}_0, \mathbf{a}_1, \ldots)$ and use gradient-based optimization to compute $\tau$ . However, this approach can perform poorly—even though we regularize $\tau$ towards the student, it could exhibit behaviors that are hard for the student to capture. Instead, we parameterize $\tau$ in a way that mirrors the student. In particular, we parameterize $\tau$ like a state machine, but rather than having modes and switching conditions that adaptively determine the sequence of action functions to be executed and the duration of execution, the sequence of action functions is fixed and each action function is executed for a fixed duration.
179
+
180
+ More precisely, we represent $\tau$ as an loop-free policy $\tau = \langle \mathcal{H},\mathcal{T}\rangle$ . To execute $\tau$ , each action function $H_{i}\in \mathcal{H}$ is applied for the corresponding duration $T_{i}\in \mathcal{T}$ , after which $H_{i + 1}$ is applied. The action functions are from the same grammar of action functions for the student.
181
+
182
+ The obvious way to represent a duration $T_{i}$ is as a number of time steps $T_{i} \in \mathbb{N}$ . However, with this choice, we cannot use continuous optimization to optimize $T_{i}$ . Instead, we fix the number of discretization steps $P$ for which $H_{i}$ is executed, and vary the time increment $\Delta_{i} = T_{i} / P$ —i.e., $\mathbf{x}_{n + 1} \approx \mathbf{x}_n + F(\mathbf{x}_n, H_i(\mathbf{o})) \cdot \Delta_i$ . We enforce $\Delta_i \leq \Delta_{\max}$ for a small $\Delta_{\max}$ to ensure that the discrete-time approximation of the dynamics is sufficiently accurate.
183
+
184
+ Figure 3(a) and (d) show examples of loop-free policies for two different initial states and two different teacher iterations. The loop-free policies in (d) are regularized to match the student's state-machine policy learned in the previous iteration (shown in Figure 3(c)).
185
+
186
+ **Optimization.** We use model-based trajectory optimization to compute loop-free policies. The main challenge is handling the term $p(\tau \mid \pi, \mathbf{x}_0)$ in the objective. Symbolically computing the
187
+
188
+ ![](images/600c038c384a2bc4af72aafe8da4c4216b55261348e117d2a3bda4058ff952d5.jpg)
189
+ Figure 3: Visualization showing the student-teacher interaction for two iterations. (a) The loop-free policies (with their corresponding rewards) learned by the teacher for two different initial states. Here, the boxes signify the different segments in the loop-free policies, the colors signify different actions, and the lengths of the boxes signify the durations of the segments. (b) The mapping between the segments and the modes in the state-machine—i.e., $p(\mu = m_j)$ . Each box shows the composition of modes vertically distributed according their probabilities. For example, the third segment in the loop-free policy for $\mathbf{x}_0^1$ has $p(\mu = \text{Green}) = 0.65$ and $p(\mu = \text{Brown}) = 0.35$ . (c) The most probable rollouts from the state-machine policy learned by the student. Finally, (d), (e) and (f) are similar to (a), (b) and (c), but for the second iteration.
190
+
191
+ this probability is hard because of the discrete-continuous structure of $\pi$ . Another alternative is to precompute the probabilities of all the trajectories $\tau$ that can be derived from $\pi$ . However, this is also infeasible because the number of trajectories is unbounded. Thus, we perform trajectory optimization in two phases. First, we use a sampling-based optimization algorithm to obtain a set of good trajectories $\tau^1, \dots, \tau^L$ . Then, we apply gradient-based optimization, replacing $p(\cdot \mid \pi, \mathbf{x}_0)$ with a term that regularizes $\tau$ to be close to $\{\tau^\ell\}_{\ell=1}^L$ .
192
+
193
+ The first phase proceeds as follows: (i) sample $\tau^1, \dots, \tau^L$ using $\pi$ from $\mathbf{x}_0$ , and let $p^\ell$ be the probability of $\tau^\ell$ according to $\pi$ , (ii) sort these samples in decreasing order of objective $p^\ell \cdot e^{\lambda R(\tau^\ell)}$ , and (iii) discard all but the top $\rho$ samples. This phase essentially performs one iteration of CEM (Mannor et al., 2003). Then, in the second phase, we replace the probability expression with $p(\tau \mid \pi, \mathbf{x}_0) \approx \frac{\sum_{\ell=1}^{\rho} p^\ell \cdot e^{-d(\tau, \tau^\ell)}}{\sum_{\ell=1}^{\rho} p^\ell}$ , which we use gradient-based optimization to optimize. Here, $d(\tau, \tau^\ell)$ is a distance metric between two loop-free policies, defined as the $L_2$ distance between the parameters of $\tau$ and $\tau^\ell$ . We chose the number of samples, $\rho = 10$ . For our benchmarks, we did not notice any improvement in the number of student-teacher iterations by increasing $\rho$ above 10. So, we believe we are not losing any information from this approximation.
194
+
195
+ # 4.3 STUDENT: LEARNING STRUCTURED STATE MACHINE POLICIES VIA IMITATION
196
+
197
+ Next, we describe how the student solves the maximum likelihood problem Eq (9) to compute $\pi^{*}$ .
198
+
199
+ Probabilistic state machines. Although the output of our algorithm is a student policy that is a deterministic state machine, our algorithm internally relies on distributions over rollouts induced by the student policy to guide the teacher. Thus, we represent the student policy as a probabilistic state machine during learning. To do so, we simply make the action functions $H_{m_j}$ and switching conditions $G_{m_{j_1}}^{m_{j_2}}$ probabilistic—instead of constant parameters in the grammar for action functions and switching conditions, now we have Gaussian distributions $\mathcal{N}(\alpha ,\sigma)$ . Then, when executing $\pi$ , we obtain i.i.d. samples of the parameters $H_{m_j}'\sim H_{m_j}$ and $\{(G_{m_j}'')'\sim G_{m_j}'\}_{m_j'}$ every time we switch to mode $m_j$ , and act according to $H_{m_j}'$ and $\{(G_{m_j}'')\}$ until the mode switches again. By re-sampling these parameters on every mode switch, we avoid dependencies across different parts of a rollout or different rollouts. On the other hand, by not re-sampling these parameters within a mode switch, we ensure that the structure of $\pi$ remains intact within a mode.
200
+
201
+ **Optimization.** Each $\tau_{k}$ can be decomposed into segments $(k,i)$ where action function $H_{k,i}$ is executed for duration $T_{k,i}$ . For example, each block in Figure 3(a) is a segment. Furthermore, for the student $\pi$ , let $H_{m_j}$ be the action function distribution for mode $m_j$ and $G_{m_{j_1}}^{m_{j_2}}$ be the switching
202
+
203
+ ![](images/4c8b287df36309ae77f7dd91201eeaa06cb7e130772933a15e46956a30001e19.jpg)
204
+ Figure 4: Comparison of performances on train (left) and test (middle) distributions. Our approach outperforms the baselines on all benchmarks in terms of test performance. An empty bar indicates that the policy learned for that experiment failed on all runs. We also plot test performance for different choices of training distribution for the Car benchmark (right).
205
+
206
+ ![](images/8efacb854ad0d14fa7fa8b7e74d9675f1f5b57f17919ec1107e2f8c8fac367ef.jpg)
207
+
208
+ ![](images/269e3c58c841a1648321b0bd6fb1a4745aabe66cc641f9390658343c74a3d251.jpg)
209
+
210
+ condition distribution for mode $m_{j_1}$ to mode $m_{j_2}$ . Note that $H_{m_j}$ and $G_{m_{j_1}}^{m_{j_2}}$ are distributions whereas $H_{k,i}$ and $T_{k,i}$ are constants. We have
211
+
212
+ $$
213
+ p (\tau_ {k} \mid \pi , {\bf x} _ {0} ^ {k}) = \prod_ {i} p (H _ {k, i} \mid \pi , {\bf x} _ {0} ^ {k}) \cdot p (T _ {k, i} \mid \pi , {\bf x} _ {0} ^ {k}).
214
+ $$
215
+
216
+ For each $(k,i)$ , let $\mu_{k,i}$ be the latent random variable indicating the $i$ th mode used by $\pi$ starting from $\mathbf{x}_0^k$ ; in particular, $\mu_{k,i}$ is a categorical random variable that takes values in the modes $\{m_j\}$ . And $\mu_{k,i} = m_j$ means that $H_{k,i}$ is sampled from the distribution $H_{m_j}$ and $T_{k,i}$ is determined by the sampled switching conditions from distributions $\{G_{m_j}^{m_j'}\}$ . Assuming the latent variable $\mu_{k,i}$ allows the student to compute $\pi^*$ by computing $H_{m_j}^*$ and $G_{m_j1}^{m_j2*}$ separately. In Figure 3, (b) and (e) show the learned mode mappings $p(\mu = m_j)$ for the segments in the loop-free policies shown in (a) and (d) respectively.
217
+
218
+ Since directly optimizing the maximum likelihood $\pi$ is hard in the presence of the latent variables $\mu_{k,i}$ , we use the standard expectation maximization (EM) approach to optimizing $\pi$ , where the E-step computes the distributions $p(\mu_{k,i} = m_j)$ assuming $\pi$ is fixed, and the M-step optimizes $\pi$ assuming the probabilities $p(\mu_{k,i} = m_j)$ are fixed. See Appendix A for details. In Figure 3, (c) and (f) show the most probable rollouts from the state-machine policies learned at the end of the EM approach for two different student iterations.
219
+
220
+ # 5 EXPERIMENTS
221
+
222
+ **Benchmarks.** We use 6 control problems, each with different training and test distributions (summarized in Figure 8 in Appendix C): (i) Car, the benchmark in Figure 1, (ii) Quad, where the goal is to maneuver a 2D quadcopter through an obstacle course by controlling its vertical acceleration, where we vary the obstacle course length, see Figure 6 leftmost, (iii) QuadPO, a variant where the obstacles are unobserved but periodic (so the agent can perform well using a repeating motion), see Figure 6 (second from left), (iv) Pendulum, where we vary the pendulum mass, (v) Cart-Pole, where we vary the time horizon and pole length, and (vi) Swimmer, where the goal is to move the swimmer forward through a viscous liquid, where we vary the length of the segments comprising the robot swimmer.
223
+
224
+ Baselines. We compare against: (i) RL: PPO with a feed-forward neural network policy, (ii) RL-LSTM: PPO with an LSTM, (iii) Direct-Opt: learning a state machine policy directly via numerical optimization. Hyper-parameters are chosen to maximize performance on the training distribution. More details about the baselines and the hyper-parameters can be found in Appendices B.2, B.3, & B.4. Each algorithm is trained 5 times; we choose the one that performs best on the training distribution.
225
+
226
+ Note that for the comparison to RL approaches, we use model-free algorithms, whereas, in our algorithm, the teacher uses model-based optimization. We do not compare against model-based RL approaches because (a) even model-free RL approaches achieve almost perfect performance on the training distribution (see Figure 4 left) and (b) our main goal is to compare the performance of our policies and the neural network policies on the test distribution and not the training distribution. Moreover, in case the model of the system is unknown, we can use known algorithms to infer the model from data (Ahmadi et al., 2018) and then use this learned model in our algorithm.
227
+
228
+ ![](images/54177baeb52bbb142af2201fc47f567f708161dccd5285e16775d33947c62f01.jpg)
229
+ (a) RL Train
230
+
231
+ ![](images/6320791857c8d80f0bfa67b536cf9abe61ae6a9b775ba750c22fc35f5f29aadb.jpg)
232
+ (b) RL Test
233
+
234
+ ![](images/dca2c4d2217b6c88efb9663dd5ce21f2d6b1f620c6c12bdeefc9b5dddee3779b.jpg)
235
+ (c) Original
236
+
237
+ ![](images/5ef9e465b6c18d7d13182980098671af0c9f81b04c82fcc8399439c4f8df5915.jpg)
238
+ (d)User change 1
239
+
240
+ ![](images/467668a1c55ccae6f94a7a8f60884ae17eecb391fb9ba12ce81eb607b75f65de.jpg)
241
+ (e)User change 2
242
+
243
+ ![](images/20160998df38d376ef0a1c59c24368e03b7ebdf934d080d00131f870f673f874.jpg)
244
+ Figure 5: (a-c) The RL policy generates unstructured trajectories, and therefore does not generalize from (a) the training distribution to (b) the test distribution. In contrast, our state machine policy in (c) generates a highly structured trajectory that generalizes well. (c-e) A user can modify our state machine policy to improve performance. In (d), the user sets the steering angle to the maximum value 0.5, and in (e), the user sets the thresholds in the switching conditions $G_{m_1}^{m_2}$ , $G_{m_2}^{m_1}$ to 0.1.
245
+
246
+ ![](images/06a3683c39000b4cdcf415b3d74d55ac618a3c248ccac5000d9c87262a4c7505.jpg)
247
+ Figure 6: Left: Trajectories for the Quad (leftmost) and QuadPO (second from the left) benchmarks using our state machine policy. Right: Graph of vertical acceleration over time for both our policy (red) and the neural network policy (blue), for Quad (second from the right) and QuadPO (rightmost).
248
+
249
+ ![](images/621e0e4362b695db60ba11736fe9f66b672a5c77e63c3742b0d337100b54982a.jpg)
250
+
251
+ ![](images/17c0cdf5dbcffa3bf1b29bc842440276d32ce2df4183af1048b95d0a6349ed93.jpg)
252
+
253
+ # 5.1 RESULTS
254
+
255
+ Figure 4 shows results on both training and test distributions. We measure performance as the fraction of rollouts (out of 1000) that both satisfy the safety specification and reach the goal.
256
+
257
+ Inductive generalization. For all benchmarks, our policy generalizes well on the test distribution. In four cases, we generalize perfectly (all runs satisfy the metric). For Quad and QuadPO, the policies result in collisions on some runs, but only towards the end of the obstacle course.
258
+
259
+ Comparison to RL. The RL policies mostly achieve good training performance, but generalize poorly since they over-specialize to states seen during training. The exceptions are Pendulum and Swimmer. Even in these cases, the RL policies take longer to reach the goals than our state machine policies (see Figure 10 and Figure 11 in Appendix C). For QuadPO, the RL policy does not achieve a good training performance since the states are partially observed. We may expect the LSTM policies to alleviate this issue. However, the LSTM policies often perform poorly even on the training distribution, and also generalize worse than the feed-forward neural network policies.
260
+
261
+ Comparison to direct-opt. The state machine policies learned using direct-opt perform poorly even in training because of the numerous local optima arising due to the structural constraints. This illustrates the need to use adaptive teaching to learn state machine policies.
262
+
263
+ # 5.2 QUALITATIVE ANALYSIS
264
+
265
+ Behavior of policy. We empirical analyze the policies. Figure 5 shows the trajectory taken by the RL policy (a), compared to our policy (c), from a training initial state for the Car benchmark. The RL policy does not exhibit a repeating behavior, which causes it to fail on the trajectory from a test state shown in (b). Similarly, Figure 6 (right) compares the actions taking by our policy to those taken by the RL policy on Quad and QuadPO. Our policy produces smooth repeating actions, whereas the RL policy does not. Action vs time graphs for other benchmarks can be found in the appendix (Figures 12, 13, & 14) and they all show similar behaviors.
266
+
267
+ Varying the training distribution. We study how the test performance changes as we vary the training distribution on the Car benchmark. We vary $X_0^{\mathrm{train}}$ as $d \sim [d_{\mathrm{min}}, 13]$ , where $d_{\mathrm{min}} = \{13, 12.5, 12, 11.5, 11.2, 11\}$ , but fix $X_0^{\mathrm{test}}$ to $d \sim [11, 12]$ . Figure 4 (right) shows how test performance varies with $d_{\mathrm{min}}$ for both our policy and the RL policy. Our policy inductively generalizes for a wide range of training distributions. In contrast, the test performance of the RL policy initially increases as the train distribution gets bigger, but it eventually starts declining. The reason is that its training performance actually starts to decline. Thus, in some settings, our approach (even when trained on smaller distributions) can produce policies that outperform the neural network policies produced by RL (even when trained on the full distribution).
268
+
269
+ Interpretability. An added benefit of our state machine policies is interpretability. In particular, we demonstrate the interpretability of our policies by showing how a user can modify a learned state machine policy. Consider the policy from Figure 1e for the autonomous car. We manually make the following changes: (i) increase the steering angle in $H_{m_1}$ to its maximum value 0.5, and (ii) decrease the gap maintained between the agent and the black cars by changing the switching condition $G_{m_1}^{m_2}$ to $d_f \leq 0.1$ and $G_{m_2}^{m_1}$ to $d_b \leq 0.1$ . Figure 5 demonstrates these changes—it shows trajectories obtained using the original policy (c), the first modified policy (d), and the second modified policy (e). There is no straightforward way to make these kinds of changes to a neural network policy.
270
+
271
+ # 6 CONCLUSION
272
+
273
+ We have proposed an algorithm for learning state machine policies that inductively generalize to novel environments. Our approach is based on a framework called adaptive teaching that alternatively learns a student that imitates a teacher, who in-turn adapts to the structure of the student. We demonstrate that our policies inductively generalize better than RL policies.
274
+
275
+ In the future, we will explore more complex grammars for the action functions and the switching conditions, for example, with some parts being small neural networks, while still retaining the ability to learn generalizable behaviors. Moreover, we will extend our approach to use model-free techniques in the teacher's algorithm to make our approach more aligned with the reinforcement learning premise. Finally, we believe that the idea of learning programmatic representations and using the adaptive teaching algorithm to deal with the mixed discrete-continuous problems can be applied to other learning settings such as supervised learning and unsupervised learning.
276
+
277
+ # ACKNOWLEDGMENTS
278
+
279
+ This work was supported by ONR N00014-17-1-2699 and NSF Award CCF-1910769.
280
+
281
+ # REFERENCES
282
+
283
+ Douglas Aberdeen and Jonathan Baxter. Scaling internal-state policy-gradient methods for pomdpds. In ICML, pp. 3-10, 2002.
284
+ Mohamadreza Ahmadi, Ufuk Topcu, and Clarence Rowley. Control-oriented learning of lagrangian and hamiltonian systems. In 2018 Annual American Control Conference (ACC), pp. 520-525. IEEE, 2018.
285
+ Zeyuan Allen-Zhu, Yuanzhi Li, and Zhao Song. A convergence theory for deep learning via overparameterization. In Kamalika Chaudhuri and Ruslan Salakhutdinov (eds.), Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine Learning Research, pp. 242-252, Long Beach, California, USA, 09-15 Jun 2019. PMLR. URL http://proceedings.mlr.press/v97/allen-zhu19a.html.
286
+ David Andre and Stuart J Russell. State abstraction for programmable reinforcement learning agents. In AAAI/IAAI, pp. 119-125, 2002.
287
+ Osbert Bastani, Yewen Pu, and Armando Solar-Lezama. Verifiable reinforcement learning via policy extraction. In Advances in Neural Information Processing Systems, pp. 2494-2504, 2018.
288
+ Pavol Bielik, Veselin Raychev, and Martin Vechev. Program synthesis for character level language modeling. In ICLR, 2017.
289
+
290
+ Jonathon Cai, Richard Shin, and Dawn Song. Making neural programming architectures generalize via recursion. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings, 2017. URL https://openreview.net/forum?id=BkbY4psgg.
291
+ Luca Carlone and Giuseppe C. Calafiore. Convex relaxations for pose graph optimization with outliers. IEEE Robotics and Automation Letters, 3(2):1160-1167, 2018. doi: 10.1109/LRA.2018.2793352. URL https://doi.org/10.1109/LRA.2018.2793352.
292
+ Prafulla Dhariwal, Christopher Hesse, Oleg Klimov, Alex Nichol, Matthias Plappert, Alec Radford, John Schulman, Szymon Sidor, Yuhuai Wu, and Peter Zhokhov. Openai baselines. https://github.com/openai/baselines, 2017.
293
+ Kevin Ellis, Armando Solar-Lezama, and Josh Tenenbaum. Unsupervised learning by program synthesis. In Advances in neural information processing systems, pp. 973-981, 2015.
294
+ Kevin Ellis, Daniel Ritchie, Armando Solar-Lezama, and Josh Tenenbaum. Learning to infer graphics programs from hand-drawn images. In Advances in Neural Information Processing Systems, pp. 6060-6069, 2018.
295
+ Chelsea Finn, Pieter Abbeel, and Sergey Levine. Model-agnostic meta-learning for fast adaptation of deep networks. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pp. 1126-1135. JMLR.org, 2017.
296
+ Eric A Hansen. Solving pomdps by searching in policy space. In Proceedings of the Fourteenth conference on Uncertainty in artificial intelligence, pp. 211-219. Morgan Kaufmann Publishers Inc., 1998.
297
+ Brenden M Lake, Ruslan Salakhutdinov, and Joshua B Tenenbaum. Human-level concept learning through probabilistic program induction. Science, 350(6266):1332-1338, 2015.
298
+ Sergey Levine and Vladlen Koltun. Guided policy search. In International Conference on Machine Learning, pp. 1-9, 2013.
299
+ Shie Mannor, Reuven Y Rubinstein, and Yohai Gat. The cross entropy method for fast policy search. In Proceedings of the 20th International Conference on Machine Learning (ICML-03), pp. 512-519, 2003.
300
+ Nicolas Meuleau, Leonid Peshkin, Kee-Eung Kim, and Leslie Pack Kaelbling. Learning finite-state controllers for partially observable environments. In Proceedings of the Fifteenth conference on Uncertainty in artificial intelligence, pp. 427-436. Morgan Kaufmann Publishers Inc., 1999.
301
+ Charles Packer, Katelyn Gao, Jernej Kos, Philipp Krahenbuhl, Vladlen Koltun, and Dawn Song. Assessing generalization in deep reinforcement learning. arXiv preprint arXiv:1810.12282, 2018.
302
+ Ronald Parr and Stuart J Russell. Reinforcement learning with hierarchies of machines. In Advances in neural information processing systems, pp. 1043-1049, 1998.
303
+ Leonid Peshkin, Nicolas Meuleau, and Leslie Kaelbling. Learning policies with external memory. arXiv preprint cs/0103003, 2001.
304
+ Pascal Poupart and Craig Boutilier. Bounded finite state controllers. In Advances in neural information processing systems, pp. 823-830, 2004.
305
+ Aravind Rajeswaran, Kendall Lowrey, Emanuel V Todorov, and Sham M Kakade. Towards generalization and simplicity in continuous control. In Advances in Neural Information Processing Systems, pp. 6550-6561, 2017.
306
+ Stéphane Ross, Geoffrey Gordon, and Drew Bagnell. A reduction of imitation learning and structured prediction to no-regret online learning. In Proceedings of the fourteenth international conference on artificial intelligence and statistics, pp. 627-635, 2011.
307
+ Stefan Schaal. Is imitation learning the route to humanoid robots? Trends in cognitive sciences, 3(6): 233-242, 1999.
308
+
309
+ Lazar Valkov, Dipak Chaudhari, Akash Srivastava, Charles Sutton, and Swarat Chaudhuri. Houdini: Lifelong learning as program synthesis. In Advances in Neural Information Processing Systems, pp. 8701-8712, 2018.
310
+ Abhinav Verma, Vijayaraghavan Murali, Rishabh Singh, Pushmeet Kohli, and Swarat Chaudhuri. Programmatically interpretable reinforcement learning. arXiv preprint arXiv:1804.02477, 2018.
311
+ Abhinav Verma, Hoang Minh Le, Yisong Yue, and Swarat Chaudhuri. Imitation-projected policy gradient for programmatic reinforcement learning. CoRR, abs/1907.05431, 2019. URL http://arxiv.org/abs/1907.05431.
312
+ Martin J Wainwright, Michael I Jordan, et al. Graphical models, exponential families, and variational inference. Foundations and Trends in Machine Learning, 1(1-2):1-305, 2008.
313
+ Halley Young, Osbert Bastani, and Mayur Naik. Learning neurosymbolic generative models via program synthesis. In International Conference on Machine Learning, pp. 7144-7153, 2019.
314
+ He Zhu, Zikang Xiong, Stephen Magill, and Suresh Jagannathan. An inductive synthesis framework for verifiable reinforcement learning. In Proceedings of the 40th ACM SIGPLAN Conference on Programming Language Design and Implementation, pp. 686-701. ACM, 2019.
315
+
316
+ # A EXPECTATION MAXIMIZATION FOR STUDENT OPTIMIZATION
317
+
318
+ # A.1 COMPUTING $p(\tau \mid \pi, \mathbf{x}_0)$
319
+
320
+ First, note that we have
321
+
322
+ $$
323
+ p (\tau_ {k} \mid \pi , \mathbf {x} _ {0} ^ {k}) = \prod_ {i} p (H _ {k, i} \mid \pi , \mathbf {x} _ {0} ^ {k}) \cdot p (T _ {k, i} \mid \pi , \mathbf {x} _ {0} ^ {k}).
324
+ $$
325
+
326
+ where
327
+
328
+ $$
329
+ p \left(H _ {k, i} \mid \pi , \mathbf {x} _ {0} ^ {k}\right) = \sum_ {j} p \left(H _ {k, i} \mid H _ {m _ {j}}\right) \cdot p \left(\mu_ {k, i} = m _ {j}\right).
330
+ $$
331
+
332
+ Similarly, the duration $T_{k,i}$ is determined both by the current mode $\mu_{i,k} = m_{j_1}$ , and by the switching conditions $G_{m_{j_1}}^- = \{G_{m_{j_1}}^{m_{j_2}}\}_{m_{j_2}}$ from the current mode $m_{j_1}$ into some other mode $m_{j_2}$ . More precisely, let $\gamma_{k,i}$ denote the trajectory (sequence of states) of the $(k,i)$ segment of $\tau_k$ , and let $\zeta(\gamma_{k,i}, G_{m_j}^-)$ denote the earliest time at which a switching condition $G \in G_{m_j}^-$ becomes true along $\gamma_{k,i}$ . Since $G \in G_{m_j}^-$ are distributions, $\zeta(\gamma_{k,i}, G_{m_j}^-)$ is a distribution on transition times. Then, we have
333
+
334
+ $$
335
+ p (T _ {k, i} \mid \pi , {\bf x} _ {0} ^ {k}) = \sum_ {m _ {j _ {1}}} \sum_ {m _ {j _ {2}}} p (\mu_ {k, i} = m _ {j _ {1}}) \cdot p (\mu_ {k, i + 1} = m _ {j _ {2}}) \cdot p (T _ {k, i} \mid G _ {m _ {j _ {1}}} ^ {m _ {j _ {2}}}, G _ {m _ {j _ {1}}} ^ {-})
336
+ $$
337
+
338
+ $$
339
+ p (T _ {k, i} \mid G _ {m _ {j _ {1}}} ^ {m _ {j _ {2}}}, G _ {m _ {j _ {1}}} ^ {-}) = p (T _ {k, i} = \zeta (\gamma_ {k, i}, G _ {m _ {j _ {1}}} ^ {m _ {j _ {2}}})) \cdot \prod_ {m _ {j _ {3}} \neq m _ {j _ {2}}} p (T _ {k, i} < \zeta (\gamma_ {k, i}, G _ {m _ {j _ {1}}} ^ {m _ {j _ {3}}})).
340
+ $$
341
+
342
+ In other words, $T_{k,i}$ is the duration until $G_{m_{j_1}}^{m_{j_2}}$ triggers, conditioned on none of the conditions $G_{m_{j_1}}^{m_{j_3}}$ triggering (where $m_{j_3} \neq m_{j_2}$ ).
343
+
344
+ # A.2 OPTIMIZING THE STUDENT POLICY
345
+
346
+ Numerically optimizing the maximum likelihood objective to compute $\pi^{*}$ is hard because it requires integrating over all possible choices for the latent variables $\mu_{k,i}$ . For example, if the teacher generates 10 loop-free policies every iteration and there are 10 modes in each loop-free policy, and 4 modes in the state-machine, the number of choices for the latent variables is $4^{100}$ , which makes the enumeration infeasible. The expectation-maximization method provides an efficient way for computing the maximum likelihood, by alternatingly optimizing for the latent variables and the state-machine parameters. The E-step computes the probability distributions $p(\mu_{k,i} = m_j)$ for a fixed $\pi$ , and the M-step optimizes $H_{m_j}$ and $G_{m_{j_1}}^{m_{j_2}}$ given $p(\mu_{k,i} = m_j)$ .
347
+
348
+ E-step. Assuming $\pi$ is fixed, we have
349
+
350
+ $$
351
+ p \left(\mu_ {k, i} = m _ {j} \mid \pi , \left\{\tau_ {k} \right\}\right) = \frac {p \left(H _ {k , i} \mid H _ {m _ {j}}\right) \cdot p \left(T _ {k , i} = \zeta \left(\gamma_ {k , i} , G _ {m _ {j}} ^ {-}\right)\right)}{\sum_ {m _ {j} ^ {\prime}} p \left(H _ {k , i} \mid H _ {m _ {j} ^ {\prime}}\right) \cdot p \left(T _ {k , i} = \zeta \left(\gamma_ {k , i} , G _ {m _ {j} ^ {\prime}} ^ {-}\right)\right)}. \tag {10}
352
+ $$
353
+
354
+ M-step. Assuming $p(\mu_{k,i} = m_j)$ is fixed, we solve
355
+
356
+ $$
357
+ \underset {\{H _ {m _ {j}} \}} {\arg \max } \sum_ {k, i} p \left(\mu_ {k, i} = m _ {j}\right) \cdot \log p \left(H _ {k, i} \mid H _ {m _ {j}}\right) \tag {11}
358
+ $$
359
+
360
+ $$
361
+ \begin{array}{l} \arg \max _ {\{G _ {m _ {j _ {1}}} ^ {m _ {j _ {2}}} \}} \sum_ {k, i} p (\mu_ {k, i} = m _ {j _ {1}}) \cdot p (\mu_ {k, i + 1} = m _ {j _ {2}}) \cdot \log p (T _ {k, i} = \zeta (\gamma_ {k, i}, G _ {m _ {j _ {1}}} ^ {m _ {j _ {2}}})) \\ + p \left(\mu_ {k, i} = m _ {j _ {1}}\right) \cdot \left(1 - p \left(\mu_ {k, i + 1} = m _ {j _ {2}}\right)\right) \cdot \log p \left(T _ {k, i} < \zeta \left(\gamma_ {k, i}, G _ {m _ {j _ {1}}} ^ {m _ {j _ {2}}}\right)\right) \tag {12} \\ \end{array}
362
+ $$
363
+
364
+ For $G_{m_{j_1}}^{m_{j_2}}$ , the first term handles the case $\mu_{k,i+1} = m_{j_2}$ , where we maximize the probability that $G_{m_{j_1}}^{m_{j_2}}$ makes the transition at duration $T_{k,i}$ , and the second term handles the case $\mu_{k,i+1} \neq m_{j_2}$ , where we maximize the probability that $G_{m_{j_1}}^{m_{j_2}}$ does not make the transition until after duration $T_{k,i}$ .
365
+
366
+ We briefly discuss how to solve these equations. For action functions, suppose that $H$ encodes the distribution $\mathcal{N}(\alpha_H,\sigma_H^2)$ over action function parameters. Then, we have
367
+
368
+ $$
369
+ \begin{array}{l} \alpha_ {H _ {m _ {j}}} ^ {*} = \frac {\sum_ {k , i} p \left(\mu_ {k , i} = m _ {j}\right) \cdot \alpha_ {H _ {k , i}}}{\sum_ {k , i} p \left(\mu_ {k , i} = m _ {j}\right)} \\ \left(\sigma_ {H _ {m _ {j}}} ^ {*}\right) ^ {2} = \frac {\sum_ {k , i} p \left(\mu_ {k , i} = m _ {j}\right) \cdot \left(\alpha_ {H _ {k , i}} - \alpha_ {H _ {m _ {j}}} ^ {*}\right) \left(\alpha_ {H _ {k , i}} - \alpha_ {H _ {m _ {j}}} ^ {*}\right) ^ {T}}{\sum_ {k , i} p \left(\mu_ {k , i} = m _ {j}\right)} \\ \end{array}
370
+ $$
371
+
372
+ Solving for the parameters of $G_{m_{j_1}}^{m_{j_2}}$ is more challenging, since there can be multiple kinds of expressions in the grammar that are switching conditions, which correspond to discrete parameters, and we need to optimize over these discrete choices. To do so, we perform a greedy search over these discrete choices (see Section A.3 for details on the greedy strategy). For each choice considered during the greedy search, we encode Eq (12) as a numerical optimization and solve it to compute the corresponding means $\alpha_{G_{m_{j_1}}^{m_{j_2}}}^*$ and standard deviations $\sigma_{G_{m_{j_1}}^{m_{j_2}}}^*$ . Then, we choose the discrete choice that achieves the best objective value according to Eq (12).
373
+
374
+ Computing the optimal parameters for switching conditions is more expensive than doing so for action functions; thus, on each student iteration, we iteratively solve Eq (10) and Eq (11) multiple times, but only solve Eq (12) once.
375
+
376
+ The EM method does not guarantee global optima but usually works well in practice. In addition, since computing the switching conditions is expensive, we had to restrict the number of EM iterations. However, note that even if the EM algorithm didn't converge, our overall algorithm can still recover by using additional teacher-student interactions.
377
+
378
+ The alternate method would be to run the EM algorithm multiple times/longer to get better results per student iteration, and "maybe" reduce the total number of teacher-student iterations. We say "maybe" because the EM algorithm might have already converged to the global optima, making the extra EM iterations useless. The trade-off between our approach and this alternative depends on whether the teacher's algorithm or the student's algorithm is expensive for a particular benchmark.
379
+
380
+ However, from Figure 15, we can see that some of our benchmarks already use very few $(< 5)$ teacher-student iterations (Car, QuadPO, Pendulum, Mountain car, and Swimmer). Of the other three benchmarks that needed many iterations, for two of them (Cartpole and Acrobot), the student's algorithm is as expensive as the teacher's algorithm. This justifies our decision to not run the EM algorithm multiple times/longer.
381
+
382
+ # A.3 SYNTHESIZING SWITCHING CONDITIONS
383
+
384
+ Next, we describe how we search over the large number of discrete choices in the grammar for switching conditions. It is not hard to show that in Eq (12), the objectives for the switching condition parameters $G_{m_{j_1}}^{m_{j_2}}$ corresponding to different transitions $(m_{j_1}, m_{j_2})$ decompose into separate problems. Therefore, we can perform the search for each transition $(m_{j_1}, m_{j_2})$ separately. For each transition, the naive approach would be to search over the possible derivations in the context-free grammar for switching conditions to some bounded depth. However, this search space is exponential in the depth due to the productions $B := B \land B$ and $B := B \lor B$ . Thus, we employ a greedy search strategy to avoid the exponential blowup.
385
+
386
+ Intuitively, our search strategy is to represent switching conditions as a kind of decision tree, and then perform a greedy algorithm to search over decision tree<sup>1</sup>. Our search strategy is similar to (but simpler than) the one in Bielik et al. (2017). In particular, we can equivalently represent a switching condition as a decision tree, where the internal nodes have the form $\mathbf{o}[i] \leq \alpha$ or $\mathbf{o}[i] \geq \alpha$ (where $i \in \{1, \dots, d_O\}$ and $\alpha \in \mathbb{R}$ are parameters), and the leaf nodes are labeled with "Switch" or "Don't switch"—e.g., Figure 7 shows two examples of switching conditions expressed as decision trees. Then, our algorithm initializes the switching condition to a single leaf node—i.e., $G_{\mathrm{cur}} \gets \text{"Switch"}$ . At each step, we consider switching conditions $G \in \mathrm{next}(G_{\mathrm{cur}})$ that expand a single leaf node of $G_{\mathrm{cur}}$ ; among these, we choose $G_{\mathrm{cur}}$ to be the one that minimizes a loss cost $(G)$ .
387
+
388
+ ![](images/14775c87042ae92cb8be4bbd5c8a933ffeeae23e6978481691a2a315a2dd3d8e.jpg)
389
+
390
+ ![](images/3ffb472de0de8cd38b91154551df62167e8999f9d052ef36b299f055766d0afc.jpg)
391
+ Figure 7: Switching conditions represented as decision trees.
392
+
393
+ Algorithm 1 Greedy algorithm for learning switching conditions.
394
+ ```txt
395
+ procedure LEARNSWITCHINGCONDITION $G_{\mathrm{cur}}\gets$ "Switch" while $|G_{\mathrm{cur}}| < N$ do $G_{\mathrm{cur}}\gets \arg \min_{G\in \mathrm{next}(G_{\mathrm{cur}})}\cos (G)$ return $G_{\mathrm{cur}}$
396
+ ```
397
+
398
+ More precisely, to construct next $(G_{\mathrm{cur}})$ , we iterate over all leaf nodes $L \in \mathrm{leaves}(G_{\mathrm{cur}})$ , and all expressions $E \in \mathcal{E}$ , where
399
+
400
+ $$
401
+ \mathcal {E} = \left\{\text {i f} \mathbf {o} [ i ] \sim \alpha \text {t h e n}" \text {S w i t c h}" e l s e " D o n ' t S w i t c h" \Bigg | i \in \{1, \dots , d _ {O} \}, \alpha \in \mathbb {R}, \sim \in \{\geq , \leq \} \right\}
402
+ $$
403
+
404
+ Here, $\sim \in \{\geq ,\leq \}$ is a inequality relation, $i\in \{1,\dots,d_O\}$ is a component of $\mathbf{o}$ , and $\alpha \in \mathbb{R}$ is a threshold. For each pair $L$ and $E$ , we consider the decision tree $G$ obtained by replacing $L$ with $E$ in $G_{\mathrm{cur}}$ . The set next $(G_{\mathrm{cur}})$ contains all $G$ constructed in this way.
405
+
406
+ Next, the loss function $\mathrm{cost}(G)$ is given by Eq (12). In each iteration, our algorithm optimizes $\mathrm{cost}(G)$ over $G \in \mathrm{next}(G_{\mathrm{cur}})$ , and updates $G_{\mathrm{cur}} \gets G$ . To solve this optimization problem, we enumerate the possible choices $\sim$ and $i$ and use numerical optimization to compute $\alpha$ (since $\alpha$ is a continuous parameter). An example of a single iteration of our algorithm is shown in Figure 7. In particular, letting $G$ be the tree on the left and $G'$ be the tree on the right, the left-most leaf node of $G$ is expanded to get $G'$ .
407
+
408
+ Our algorithm is summarized in Algorithm 1. Overall, our algorithm searches over $N \cdot (N - 1) \cdot d_{O}$ different discrete structures, where $N$ is the number of nodes in the decision tree and $d_{O}$ is the length of the observation vector $\mathbf{o}$ .
409
+
410
+ # B IMPLEMENTATION DETAILS
411
+
412
+ # B.1 BENCHMARKS AND STATE-MACHINES STATISTICS
413
+
414
+ Figure 8 shows the statistics regarding the benchmarks such as the number of action variables and observation variables, and the set of initial states used for training and testing. Figure 8 also shows the different aspects of the grammar used to describe the space of possible state-machine policies. We are able to learn policies for these benchmarks using 2 to 4 distinct modes in the state-machine with either a constant or a proportional grammar for the action functions. We use the Boolean tree grammar of depth 1 or 2 for all the switching conditions.
415
+
416
+ For the Quad benchmark, the action variable is the acceleration of the quadcopter in the vertical direction. The observations include the position $x, y$ , the velocities $v_x, v_y$ , and the four sensors $ox_l, ox_u, oyl, oy_u$ to describe the obstacle course in the near neighborhood. The QuadPO benchmark has the same action space as the Quad benchmark, but can only observe $x, y, v_x$ , and $v_y$ . The synthesized state-machine policies for these benchmarks are shown in Figure 16 and Figure 17. The action functions used for these benchmarks choose the acceleration to be proportional to $v_y$ .
417
+
418
+ The goal for the Pendulum benchmark is to control the force (continuous) at the actuated link in order to invert the link. The observation space includes the full state, i.e., the angle $\theta$ and the angular velocity $\omega$ . Figure 18 shows the synthesized state-machine policy for the pendulum benchmark.
419
+
420
+ <table><tr><td>Bench</td><td>#A</td><td>#O</td><td>\( {X}_{0}^{\text{train }} \)</td><td>\( {X}_{0}^{\text{test }} \)</td><td># modes</td><td>A_G</td><td>C_G</td></tr><tr><td>Car</td><td>2</td><td>5</td><td>d ~ [12,13.5]m</td><td>d ~ [11,12]m</td><td>3</td><td>Constant</td><td>Boolean tree (depth 1)</td></tr><tr><td>Quad</td><td>1</td><td>8</td><td>x dist = 40m</td><td>x dist = 80m</td><td>2</td><td>Proportional</td><td>Boolean tree (depth 1)</td></tr><tr><td>QuadPO</td><td>1</td><td>4</td><td>x dist = 60m</td><td>x dist = 120m</td><td>2</td><td>Proportional</td><td>Boolean tree (depth 1)</td></tr><tr><td>Pendulum</td><td>1</td><td>2</td><td>mass ~ [1,1.5]kg</td><td>mass ~ [1.5,5]kg</td><td>2</td><td>Constant</td><td>Boolean tree (depth 2)</td></tr><tr><td>Cartpole</td><td>1</td><td>4</td><td>time = 5s, len = 0.5</td><td>time = 300s, len = 1.0</td><td>2</td><td>Constant</td><td>Boolean tree (depth 2)</td></tr><tr><td>Acrobot</td><td>1</td><td>4</td><td>masses = [0.2,0.5]</td><td>masses = [0.5,2]</td><td>2</td><td>Constant</td><td>Boolean tree (depth 2)</td></tr><tr><td>Mountain car</td><td>1</td><td>2</td><td>power = [5,15]e-4</td><td>power = [3,5]e-4</td><td>2</td><td>Constant</td><td>Boolean tree (depth 1)</td></tr><tr><td>Swimmer</td><td>3</td><td>10</td><td>len = 1 unit</td><td>len = 0.75 unit</td><td>4</td><td>Proportional</td><td>Boolean tree (depth 2)</td></tr></table>
421
+
422
+ Figure 8: Summary of our benchmarks. #A is the action dimension, #O is the observation dimension, $X_0^{\mathrm{train}}$ is the set of initial states used for training, $X_0^{\mathrm{test}}$ is the set of initial states used to test inductive generalization, # modes is the number of modes in the state machine policy, and A_G and C_G are the grammars for action functions and switching conditions, respectively. Depth of C_G indicates the number of levels in the Boolean tree.
423
+
424
+ The Cartpole benchmark consists of a pole attached to a cart. The goal is to keep the pole upright by applying a continuous force to move the cart to the right or to the left. The observations include the position $x$ , the velocity $v$ of the cart, the angle $\theta$ , and the angular velocity $\omega$ of the pole. The synthesized solution is shown in Figure 19.
425
+
426
+ The Acrobot benchmark is similar to the Pendulum benchmark but with two links; only the top link can be actuated, and the goal is to drive the bottom link above a certain height. The observations are the angles $\theta_{1},\theta_{2}$ and the angular velocities $\omega_{1},\omega_{2}$ of the two links. For this benchmark, we vary the mass of the links between the training and the test distributions. The synthesized solution is shown in Figure 20.
427
+
428
+ For the Mountain car benchmark, the goal is to drive a low powered car to the top of a hill. An agent has to drive back and forth to gain enough momentum to be able to cross the hill. The agent controls the force (continuous) to move the car to the right or left and observes the position $x$ and the velocity $v$ at every timestep. We vary the power of the car between the training and the test distributions. The synthesized solution is shown in Figure 21.
429
+
430
+ The Swimmer benchmark is based on the Mujoco's swimmer. To make this benchmark more challenging, we use 4 segments instead of 3. There are three actions that control the torques at the joints and the goal is to make the swimmer move forward through a viscous liquid. The agent can observe the swimmer's global angle $\theta$ , the joint angles $(\theta_{1}, \theta_{2}, \theta_{3})$ , the swimmer's global angular velocity $\omega$ , the angular velocities of the joints $(\omega_{1}, \omega_{2}, \omega_{3})$ , and the velocity of the center of mass $(v_{x}, v_{y})$ . We vary the length of the segments between the training and the test distributions. The actions are chosen to be proportional to their corresponding angles. The synthesized state machine policy is shown in Figure 22.
431
+
432
+ # B.2 HYPER-PARAMETERS
433
+
434
+ There are three main hyper-parameters in our algorithm:
435
+
436
+ - The maximum number of segments/modes in a loop-free policy. A large number of segments makes the teacher's numerical optimization slow, while a small number of segments might not be sufficient to get a high reward.
437
+ - The maximum time that a segment can be executed for in a loop-free policy. This maximum time constraint helps the numerical optimization to avoid local optima that arise from executing a particular (non-convex) action function for too long.
438
+ - The parameter $\lambda$ in Section 4.1. This parameter strikes a balance between preferring high-reward loop-free policies versus preferring policies that are similar to the state-machine learned so far.
439
+
440
+ The first two parameters solely affect the teacher's algorithm; thus, we choose them by randomly sampling from a set and select the one that produces high-reward loop-free policies. We use $\lambda = 100$ for all our experiments.
441
+
442
+ # B.3 THE DIRECT-OPT BASELINE
443
+
444
+ For this baseline, we convert the problem of synthesizing a state machine policy into a numerical optimization problem. To do this, we first encode the discreteness in the grammar for switching conditions into a continuous one-hot representation. For example, the set of expressions $\mathbf{o}[i] \leq \alpha_0$ or $\mathbf{o}[i] \geq \alpha_0$ are encoded as $\alpha_s (\alpha_1 \mathbf{o}[1] + \alpha_2 \mathbf{o}[2] + \dots + \alpha_n \mathbf{o}[n]) \leq \alpha_0$ with constraints $-1 \leq \alpha_s \leq 1$ , $\forall i \in \{1, \dots, n\}$ . $0 \leq \alpha_i \leq 1$ and $\sum_{i=1}^{n} \alpha_i = 1$ . The choices between the leaf expressions, conjunctions, and disjunctions are also encoded in a one-hot fashion. We also tried an encoding without the extra constraints on $\alpha$ -i.e., the switching conditions are linear functions of the observations. We would expect the linear encoding to be less generalizable than the one-hot encoding. However, we found that it is hard to even synthesize a policy that works well on the training set with either of the encodings.
445
+
446
+ Another difficulty with direct optimization is that we need to optimize the combined reward from all the initial states at once. In contrast, the numerical optimization performed by the teacher in our approach can optimize the reward for each initial state separately. To deal with issue, we use a batch optimization technique that uses 10 initial states for every batch and seeds the starting point of the numerical optimization for each batch with the parameters found so far. It also restarts the process with a random starting point if the numerical optimization stalls. We carryout this process in parallel using 10 threads until either a solution is found or the time exceeds 2 hours.
447
+
448
+ # B.4 RL BASELINES
449
+
450
+ We use the PPO2 implementation from OpenAI Baselines (Dhariwal et al., 2017) with the standard MLP and LSTM networks for our RL baselines using $10^{7}$ timesteps for training.
451
+
452
+ Environment featurization. We used the same action spaces, observation spaces, and the set of initial states that we used for our approach. One exception is the Car benchmark, for which we appended the observation vector with observations from the previous timestep. This modification was essential for the RL baseline to achieve a good performance on the training dataset.
453
+
454
+ Designing reward functions. While our approach takes in a safe specification $\phi_S(\mathbf{x})\leq 0$ and a goal specification $\phi_G(\mathbf{x})\leq 0$ , the RL baselines need a reward function. For the classic control problems such as cartpole, pendulum, acrobot, mountain car and swimmer, we used the standard reward functions as specified by their OpenAI environments. For Quad and QuadPO benchmarks, since the goal is to avoid collisions for as long as possible, we use a reward of 1 for every timestep that the agent is alive and the agent is terminated as soon as it collides with any of the obstacles. Designing the reward function for the Car benchmark was tricky, because this benchmark has both a goal and a safety specification, and finding a right balance between them is crucial for learning. We tried various forms of rewards functions and finally, found that the following version achieves better performance on the training distribution (on the metric that measures the fraction of roll-outs that satisfy both the goal and the safety property):
455
+
456
+ $$
457
+ r (\mathbf {x}, \mathbf {a}) = - \phi_ {G} (\mathbf {x}) ^ {+} + \left\{ \begin{array}{l l} - L & \text {i f} \phi_ {S} (\mathbf {x}) > 0 \\ 0 & \text {o t h e r w i s e} \end{array} \right.
458
+ $$
459
+
460
+ which adds the numerical error for not satisfying the goal with a constant negative error $(-L)$ if the safety specification is violated at any time-step. We tried different values for $L \in \{0.1, 1, 2, 10, 20\}$ and found that $L = 10$ achieves the best performance on the training distribution.
461
+
462
+ Hyper-parameters search. We performed a search over the various hyper-parameters in the PPO2 algorithm. We ran 10 instances of the PPO2 algorithm with parameters uniformly sampled from the space given below, and chose the one that performs well on the training distribution. This sampling is not exhaustive, but our results in Figure 4 (left most) show that we did find parameters that achieve good training performances for most of our benchmarks.
463
+
464
+ - The number of training minibatches per update, nminibatches = {1,2,4,8,16,32,64,128,256,512,1024,2048}. For the lstm network, we set this hyper-parameter to 1.
465
+
466
+ <table><tr><td colspan="2"></td><td colspan="2">Performance on Train dist.</td><td colspan="2">Performance on Test dist.</td></tr><tr><td>Bench</td><td>Algorithm</td><td>G</td><td>T_G</td><td>G</td><td>T_G</td></tr><tr><td rowspan="3">Acrobot</td><td>Ours</td><td>0.08</td><td>7.9s</td><td>0.02</td><td>31.8s</td></tr><tr><td>RL</td><td>0.16</td><td>6.5s</td><td>0.0</td><td>45.2s</td></tr><tr><td>Direct-opt</td><td>⊥</td><td>⊥</td><td>⊥</td><td>⊥</td></tr><tr><td rowspan="3">Mountain car</td><td>Ours</td><td>0.001</td><td>168.5s</td><td>0.008</td><td>290.1s</td></tr><tr><td>RL</td><td>0.0</td><td>98.7s</td><td>0.0</td><td>214.7s</td></tr><tr><td>Direct-opt</td><td>0.006</td><td>105.3s</td><td>2.18</td><td>216.0s</td></tr></table>
467
+
468
+ ![](images/371614b43961557c0a7c3af7ddd90a035ebe66db8f8673dc3864a169e72ce3ae.jpg)
469
+ Figure 9: Experiment results for additional benchmarks. G is the average goal error (closer to 0 is better). T_G is the average number of timesteps to reach the goal (lower the better). $\bot$ indicates timeout. We can see that both our approach and RL generalizes for these benchmarks.
470
+
471
+ ![](images/ed320224d02af60c72f064a0b7e1675dad5055a75448eec3ec2554bbfeb370d2.jpg)
472
+ Figure 10: Trajectories taken by our state machine policy (left) and the RL policy (middle) on Pendulum for a test environment (i.e., heavier pendulum). Green (resp., red) indicates positive (resp., negative) torque. Our policy performs optimally by using positive torque when angular velocity $\geq 0$ and negative torque otherwise. In contrast, the RL policy performs sub-optimally (especially in the beginning of the trajectory).
473
+
474
+ - The policy entropy coefficient in the optimization objective, entcoef = {0.0, 0.01, 0.05, 0.1}.
475
+ - The number of training epochs per update, noptepochoes $\in$ {3,..., 36}.
476
+ - The clipping range, cliprange = {0.1, 0.2, 0.3}.
477
+ The learning rate, $\mathrm{lr}\in [5\times 10^{-6},0.003]$
478
+
479
+ # C ADDITIONAL RESULTS
480
+
481
+ # C.1 ADDITIONAL PERFORMANCE RESULTS
482
+
483
+ Figure 9 shows the training and test performance for the acrobot and mountain car benchmarks. We can see that both our approach and RL generalizes for these benchmarks.
484
+
485
+ Figure 10 qualitatively analyzes the policies learned by our approach versus RL for the Pendulum benchmark. We can see that the RL policy performs slightly sub-optimally compared to our policy.
486
+
487
+ Figure 11 shows the trajectories from the learned state machine policy and RL policy on Swimmer for a train environment and a test environment. While both policies generalize, the swimmer with the state machine policy is slightly faster (it takes about 35s to cover a distance of 10 units while the RL policy takes about 45s).
488
+
489
+ Figures 12, 13, & 14 show the action versus time plots for the various benchmarks using the learned state-machine policies and neural network policies. We can see that state-machine policies produce smooth actions, whereas the RL policies do not.
490
+
491
+ ![](images/4040b24d91e052e365623b24b6b4b3f1a978fa3d387b843fca87056b6bca3b09.jpg)
492
+ (a)
493
+
494
+ ![](images/5751e21a9db534377b9e610494907e6abef1996b7ad5649dad8de168999be764.jpg)
495
+ (b)
496
+
497
+ ![](images/2cfc418a160a27c6ff70150a5bc8a04ee62f4e56df41b56678bff74bfa341b89.jpg)
498
+ (c)
499
+
500
+ ![](images/466280d9e91ccf307378783ed5f8dc909f557a5fc92e27763ba67a2d1cd84c0f.jpg)
501
+ (d)
502
+
503
+ ![](images/130ebdfca6cee9423a3d56594868eb607e543e203b480a79553abe29de974063.jpg)
504
+ Figure 11: Trajectories taken by our state machine policy on Swimmer for (a) a train environment with segments of length 1, and (b) a test environment with segments of length 0.75. The colors indicate different modes. The axes are the $x$ and $y$ coordinates of the center of mass of the swimmer. Trajectories taken by the RL policy on Swimmer for (c) a train environment, and (d) a test environment. While both policies generalize, the swimmer with the state machine policy is slightly faster (it takes about 35s to cover a distance of 10 units while the RL policy takes about 45s).
505
+
506
+ ![](images/49bfc00e10db057e744a666b8467d81b73b12d0a4e200da13e7d69ab3276899d.jpg)
507
+ Figure 12: Action vs time graphs for the car benchmark for both our policy (red) and the neural network policy (blue). Left shows the velocity of the agent and Right shows the steering angle.
508
+
509
+ # C.2 ANALYSIS OF RUNNING TIME
510
+
511
+ Figure 15 shows the synthesis times for various benchmarks. It also shows the number of student-teacher iterations, and the time spent by the teacher and the student separately. The teacher optimizes the loop-free policies for different initial states in parallel. The student optimizes the switching conditions between different pairs of modes in parallel. We used a parallelized implementation with 10 threads, and report the wall clock time.
512
+
513
+ ![](images/65264e93090e55e059a37af68f88eb7efff4d561002c23327222c4caf52414b9.jpg)
514
+ Figure 13: Action vs time graphs for the pendulum benchmark (left) and the cartpole benchmark (right) for both our policy (red) and the neural network policy (blue).
515
+
516
+ ![](images/ac05d3e25f2f56a210a4e3ea6f3fe1b1963663af80da9e058fff0a20432c5d1b.jpg)
517
+
518
+ ![](images/4055762b7a7f8af0d5b3c0303a2a3290dd2e95fb215be61fb20b1398a06f5dc7.jpg)
519
+ Figure 14: Action vs time graphs for the swimmer benchmark for the three torques at the three different joints of the swimmer. Blue line is for the neural network policy and red line is for the state machine policy.
520
+
521
+ ![](images/1ac7dfc0e8f3b12241ba9859e2ebc3089ac260ff3c90b6797e42cdaf592eb7a7.jpg)
522
+
523
+ ![](images/c72baee50738e40c4d0f91dcd7a851fd23f52070a4ec27d75e3fabf523326722.jpg)
524
+
525
+ ![](images/728e8be4146d016f2ced2c65f28e0816456789546dadc3809d059d5cb613ac6e.jpg)
526
+ Figure 15: Synthesis times (in seconds, wall clock time) for learning state machines policies for the different benchmarks. The plot breaks down the total synthesis time into time taken by the teacher, the student and other miscellaneous parts of the algorithm. Misc. mostly includes the time spent for checking convergence at every iteration. The plot also shows the number of teacher-student iterations taken for each benchmark.
527
+
528
+ ![](images/3506ac83773c64dc994b1b61713c4709f85625b3cb8fd457ba6fd8bd1aee44b7.jpg)
529
+
530
+ ![](images/8bbdc9bba00da9e93ceb94b62ea37b8be41770962ff37aa7acdb4676237fea27.jpg)
531
+ Figure 16: Synthesized state-machine policy for the Quad benchmark.
532
+
533
+ ![](images/26c8a29feef7415c5395863c86869ffce3270b9c706cd9b76f8e7c30343b0cdc.jpg)
534
+ Figure 17: Synthesized state-machine policy for the QuadPO benchmark.
535
+
536
+ ![](images/00a6128ee0089e219e8c5d8480c6e9b8bd98635b5ff69bc6f9d8e81bc7bc6a96.jpg)
537
+ Figure 18: Synthesized state-machine policy for Pendulum.
538
+
539
+ ![](images/ed5fb0fef6e4200d3adfd0582168ec403b24a0764df2983bd59f2a2f3626fa50.jpg)
540
+ Figure 19: Synthesized state-machine policy for Cartpole.
541
+
542
+ ![](images/1fc705c315fc48ce9f503bcc2bc29e2249a3f380a04c4d8a1eb4f37f2884b0c0.jpg)
543
+ Figure 20: Synthesized state-machine policy for Acrobot.
544
+
545
+ ![](images/5e3e2cd8586f7177a9f74dc5eb16aa0d6372bf0962b30da89fe8d97ad080baf4.jpg)
546
+ Figure 21: Synthesized state-machine policy for Mountain car.
547
+
548
+ ![](images/ad5e37fc2ebd9e9680c38d516f52737fd86681918519fac6a6d51a3e89526da0.jpg)
549
+ Figure 22: Synthesized state-machine policy for Swimmer.
synthesizingprogrammaticpoliciesthatinductivelygeneralize/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9922b3103def39802011f1f6d75a44922d45266bdd2209dfb4187895b734f3e
3
+ size 755958
synthesizingprogrammaticpoliciesthatinductivelygeneralize/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58144fe9ba6fa4bdc5092244df051f37ae3a9d15ea753858a09a46ae28358711
3
+ size 851960
tabfactalargescaledatasetfortablebasedfactverification/6902c8b0-acd5-41b8-8542-8ac0af074538_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d1f6a46700709ff804fab2eef40d6f6b142e82d39db8c363ed577a1946e3a33
3
+ size 137611
tabfactalargescaledatasetfortablebasedfactverification/6902c8b0-acd5-41b8-8542-8ac0af074538_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18693f6dd0444cde0cf5cfd8669c3ddcbea8754a63c1f1799bfb408da9506329
3
+ size 167543