Add Batch 321a4fc7-8e61-4672-b224-f6b1626762cd
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- agoodimagegeneratoriswhatyouneedforhighresolutionvideosynthesis/3cbd573e-42be-4482-b6aa-d8f0fa977158_content_list.json +3 -0
- agoodimagegeneratoriswhatyouneedforhighresolutionvideosynthesis/3cbd573e-42be-4482-b6aa-d8f0fa977158_model.json +3 -0
- agoodimagegeneratoriswhatyouneedforhighresolutionvideosynthesis/3cbd573e-42be-4482-b6aa-d8f0fa977158_origin.pdf +3 -0
- agoodimagegeneratoriswhatyouneedforhighresolutionvideosynthesis/full.md +476 -0
- agoodimagegeneratoriswhatyouneedforhighresolutionvideosynthesis/images.zip +3 -0
- agoodimagegeneratoriswhatyouneedforhighresolutionvideosynthesis/layout.json +3 -0
- gradsimdifferentiablesimulationforsystemidentificationandvisuomotorcontrol/d8d01270-8309-43a6-8c79-c5c66d4208a3_content_list.json +3 -0
- gradsimdifferentiablesimulationforsystemidentificationandvisuomotorcontrol/d8d01270-8309-43a6-8c79-c5c66d4208a3_model.json +3 -0
- gradsimdifferentiablesimulationforsystemidentificationandvisuomotorcontrol/d8d01270-8309-43a6-8c79-c5c66d4208a3_origin.pdf +3 -0
- gradsimdifferentiablesimulationforsystemidentificationandvisuomotorcontrol/full.md +599 -0
- gradsimdifferentiablesimulationforsystemidentificationandvisuomotorcontrol/images.zip +3 -0
- gradsimdifferentiablesimulationforsystemidentificationandvisuomotorcontrol/layout.json +3 -0
- notmiwaedeepgenerativemodellingwithmissingnotatrandomdata/2a76e7f9-4c61-476b-980a-e683c4837d2f_content_list.json +3 -0
- notmiwaedeepgenerativemodellingwithmissingnotatrandomdata/2a76e7f9-4c61-476b-980a-e683c4837d2f_model.json +3 -0
- notmiwaedeepgenerativemodellingwithmissingnotatrandomdata/2a76e7f9-4c61-476b-980a-e683c4837d2f_origin.pdf +3 -0
- notmiwaedeepgenerativemodellingwithmissingnotatrandomdata/full.md +512 -0
- notmiwaedeepgenerativemodellingwithmissingnotatrandomdata/images.zip +3 -0
- notmiwaedeepgenerativemodellingwithmissingnotatrandomdata/layout.json +3 -0
- tiltedempiricalriskminimization/a1a012d8-2343-44f4-9b78-b923307467ac_content_list.json +3 -0
- tiltedempiricalriskminimization/a1a012d8-2343-44f4-9b78-b923307467ac_model.json +3 -0
- tiltedempiricalriskminimization/a1a012d8-2343-44f4-9b78-b923307467ac_origin.pdf +3 -0
- tiltedempiricalriskminimization/full.md +0 -0
- tiltedempiricalriskminimization/images.zip +3 -0
- tiltedempiricalriskminimization/layout.json +3 -0
- tomographicautoencoderunsupervisedbayesianrecoveryofcorrupteddata/27ed7f54-bd21-41d8-b443-e6a7645e0828_content_list.json +3 -0
- tomographicautoencoderunsupervisedbayesianrecoveryofcorrupteddata/27ed7f54-bd21-41d8-b443-e6a7645e0828_model.json +3 -0
- tomographicautoencoderunsupervisedbayesianrecoveryofcorrupteddata/27ed7f54-bd21-41d8-b443-e6a7645e0828_origin.pdf +3 -0
- tomographicautoencoderunsupervisedbayesianrecoveryofcorrupteddata/full.md +584 -0
- tomographicautoencoderunsupervisedbayesianrecoveryofcorrupteddata/images.zip +3 -0
- tomographicautoencoderunsupervisedbayesianrecoveryofcorrupteddata/layout.json +3 -0
- towardsfasterandstabilizedgantrainingforhighfidelityfewshotimagesynthesis/ed4f063f-eda6-43e6-9fed-7af237e31bcc_content_list.json +3 -0
- towardsfasterandstabilizedgantrainingforhighfidelityfewshotimagesynthesis/ed4f063f-eda6-43e6-9fed-7af237e31bcc_model.json +3 -0
- towardsfasterandstabilizedgantrainingforhighfidelityfewshotimagesynthesis/ed4f063f-eda6-43e6-9fed-7af237e31bcc_origin.pdf +3 -0
- towardsfasterandstabilizedgantrainingforhighfidelityfewshotimagesynthesis/full.md +270 -0
- towardsfasterandstabilizedgantrainingforhighfidelityfewshotimagesynthesis/images.zip +3 -0
- towardsfasterandstabilizedgantrainingforhighfidelityfewshotimagesynthesis/layout.json +3 -0
- towardsimpartialmultitasklearning/9998397f-fd2e-4bb6-9a26-ea785dd420a2_content_list.json +3 -0
- towardsimpartialmultitasklearning/9998397f-fd2e-4bb6-9a26-ea785dd420a2_model.json +3 -0
- towardsimpartialmultitasklearning/9998397f-fd2e-4bb6-9a26-ea785dd420a2_origin.pdf +3 -0
- towardsimpartialmultitasklearning/full.md +504 -0
- towardsimpartialmultitasklearning/images.zip +3 -0
- towardsimpartialmultitasklearning/layout.json +3 -0
- towardsresolvingtheimplicitbiasofgradientdescentformatrixfactorizationgreedylowranklearning/3dd8ea00-b141-49dd-8ae7-d66c196095f1_content_list.json +3 -0
- towardsresolvingtheimplicitbiasofgradientdescentformatrixfactorizationgreedylowranklearning/3dd8ea00-b141-49dd-8ae7-d66c196095f1_model.json +3 -0
- towardsresolvingtheimplicitbiasofgradientdescentformatrixfactorizationgreedylowranklearning/3dd8ea00-b141-49dd-8ae7-d66c196095f1_origin.pdf +3 -0
- towardsresolvingtheimplicitbiasofgradientdescentformatrixfactorizationgreedylowranklearning/full.md +0 -0
- towardsresolvingtheimplicitbiasofgradientdescentformatrixfactorizationgreedylowranklearning/images.zip +3 -0
- towardsresolvingtheimplicitbiasofgradientdescentformatrixfactorizationgreedylowranklearning/layout.json +3 -0
- towardsrobustneuralnetworksviacloseloopcontrol/2ba8ed7d-798d-43ad-92c9-d621e78af611_content_list.json +3 -0
- towardsrobustneuralnetworksviacloseloopcontrol/2ba8ed7d-798d-43ad-92c9-d621e78af611_model.json +3 -0
agoodimagegeneratoriswhatyouneedforhighresolutionvideosynthesis/3cbd573e-42be-4482-b6aa-d8f0fa977158_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f3459ea809a629ae5571884e89d96ecebbc010fa5505e95545fdd86a4a583d66
|
| 3 |
+
size 119104
|
agoodimagegeneratoriswhatyouneedforhighresolutionvideosynthesis/3cbd573e-42be-4482-b6aa-d8f0fa977158_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c784cfa932c0e4fb25509d5e9c15d1116d06fe0de30f378196a1e9484a6b4829
|
| 3 |
+
size 152674
|
agoodimagegeneratoriswhatyouneedforhighresolutionvideosynthesis/3cbd573e-42be-4482-b6aa-d8f0fa977158_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e662f37ae5ae16aab5b6a307992a0b13859c637d9c81f101dcc23f36434f5559
|
| 3 |
+
size 49354596
|
agoodimagegeneratoriswhatyouneedforhighresolutionvideosynthesis/full.md
ADDED
|
@@ -0,0 +1,476 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# A GOOD IMAGE GENERATOR IS WHAT YOU NEED FOR HIGH-RESOLUTION VIDEO SYNTHESIS
|
| 2 |
+
|
| 3 |
+
Yu Tian $^{1}$ ; Jian Ren $^{2}$ , Mengei Chai $^{2}$ , Kyle Olszewski $^{2}$ , Xi Peng $^{3}$ , Dimitris N. Metaxas $^{1}$ , Sergey Tulyakov $^{2}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Rutgers University, $^{2}$ Snap Inc., $^{3}$ University of Delaware $\{\mathrm{yt}219, \mathrm{dnm}\} @$ cs.rutgers.edu, $\{\mathrm{jren}, \mathrm{mchai}, \mathrm{kolszewski}, \mathrm{stulyakov}\} @$ snapchat.com
|
| 6 |
+
|
| 7 |
+
# ABSTRACT
|
| 8 |
+
|
| 9 |
+
Image and video synthesis are closely related areas aiming at generating content from noise. While rapid progress has been demonstrated in improving image-based models to handle large resolutions, high-quality renderings, and wide variations in image content, achieving comparable video generation results remains problematic. We present a framework that leverages contemporary image generators to render high-resolution videos. We frame the video synthesis problem as discovering a trajectory in the latent space of a pre-trained and fixed image generator. Not only does such a framework render high-resolution videos, but it also is an order of magnitude more computationally efficient. We introduce a motion generator that discovers the desired trajectory, in which content and motion are disentangled. With such a representation, our framework allows for a broad range of applications, including content and motion manipulation. Furthermore, we introduce a new task, which we call cross-domain video synthesis, in which the image and motion generators are trained on disjoint datasets belonging to different domains. This allows for generating moving objects for which the desired video data is not available. Extensive experiments on various datasets demonstrate the advantages of our methods over existing video generation techniques. Code will be released at https://github.com/snap-research/MoCoGAN-HD.
|
| 10 |
+
|
| 11 |
+
# 1 INTRODUCTION
|
| 12 |
+
|
| 13 |
+
Video synthesis seeks to generate a sequence of moving pictures from noise. While its closely related counterpart—image synthesis—has seen substantial advances in recent years, allowing for synthesizing at high resolutions (Karras et al., 2017), rendering images often indistinguishable from real ones (Karras et al., 2019), and supporting multiple classes of image content (Zhang et al., 2019), contemporary improvements in the domain of video synthesis have been comparatively modest. Due to the statistical complexity of videos and larger model sizes, video synthesis produces relatively low-resolution videos, yet requires longer training times. For example, scaling the image generator of Brock et al. (2019) to generate $256 \times 256$ videos requires a substantial computational budget<sup>1</sup>. Can we use a similar method to attain higher resolutions? We believe a different approach is needed.
|
| 14 |
+
|
| 15 |
+
There are two desired properties for generated videos: (i) high quality for each individual frame, and (ii) the frame sequence should be temporally consistent, i.e. depicting the same content with plausible motion. Previous works (Tulyakov et al., 2018; Clark et al., 2019) attempt to achieve both goals with a single framework, making such methods computationally demanding when high resolution is desired. We suggest a different perspective on this problem. We hypothesize that, given an image generator that has learned the distribution of video frames as independent images, a video can be represented as a sequence of latent codes from this generator. The problem of video synthesis can then be framed as discovering a latent trajectory that renders temporally consistent images. Hence, we demonstrate that (i) can be addressed by a pre-trained and fixed image generator, and (ii) can be achieved using the proposed framework to create appropriate image sequences.
|
| 16 |
+
|
| 17 |
+
To discover the appropriate latent trajectory, we introduce a motion generator, implemented via two recurrent neural networks, that operates on the initial content code to obtain the motion representation. We model motion as a residual between continuous latent codes that are passed to the image generator for individual frame generation. Such a residual representation can also facilitate the disentangling of motion and content. The motion generator is trained using the chosen image discriminator with contrastive loss to force the content to be temporally consistent, and a patch-based multi-scale video discriminator for learning motion patterns. Our framework supports contemporary image generators such as StyleGAN2 (Karras et al., 2019) and BigGAN (Brock et al., 2019).
|
| 18 |
+
|
| 19 |
+
We name our approach as MoCoGAN-HD (Motion and Content decomposed GAN for High-Definition video synthesis) as it features several major advantages over traditional video synthesis pipelines. First, it transcends the limited resolutions of existing techniques, allowing for the generation of high-quality videos at resolutions up to $1024 \times 1024$ . Second, as we search for a latent trajectory in an image generator, our method is computationally more efficient, requiring an order of magnitude less training time than previous video-based works (Clark et al., 2019). Third, as the image generator is fixed, it can be trained on a separate high-quality image dataset. Due to the disentangled representation of motion and content, our approach can learn motion from a video dataset and apply it to an image dataset, even in the case of two datasets belonging to different domains. It thus unleashes the power of an image generator to synthesize high quality videos when a domain (e.g., dogs) contains many high-quality images but no corresponding high-quality videos (see Fig. 4). In this manner, our method can generate realistic videos of objects it has never seen moving during training (such as generating realistic pet face videos using motions extracted from images of talking people). We refer to this new video generation task as cross-domain video synthesis. Finally, we quantitatively and qualitatively evaluate our approach, attaining state-of-the-art performance on each benchmark, and establish a challenging new baseline for video synthesis methods.
|
| 20 |
+
|
| 21 |
+
# 2 RELATED WORK
|
| 22 |
+
|
| 23 |
+
Video Synthesis. Approaches to image generation and translation using Generative Adversarial Networks (GANs) (Goodfellow et al., 2014) have demonstrated the ability to synthesize high quality images (Radford et al., 2016; Zhang et al., 2019; Brock et al., 2019; Donahue & Simonyan, 2019; Jin et al., 2021). Built upon image translation (Isola et al., 2017; Wang et al., 2018b), works on video-to-video translation (Bansal et al., 2018; Wang et al., 2018a) are capable of converting an input video to a high-resolution output in another domain. However, the task of high-fidelity video generation, in the unconditional setting, is still a difficult and unresolved problem. Without the strong conditional inputs such as segmentation masks (Wang et al., 2019) or human poses (Chan et al., 2019; Ren et al., 2020) that are employed by video-to-video translation works, generating videos following the distribution of training video samples is challenging. Earlier works on GAN-based video modeling, including MDPGAN (Yushchenko et al., 2019), VGAN (Vondrick et al., 2016), TGAN (Saito et al., 2017), MoCoGAN (Tulyakov et al., 2018), ProgressiveVGAN (Acharya et al., 2018), TGANv2 (Saito et al., 2020) show promising results on low-resolution datasets. Recent efforts demonstrate the capacity to generate more realistic videos, but with significantly more computation (Clark et al., 2019; Weissenborn et al., 2020). In this paper, we focus on generating realistic videos using manageable computational resources. LDVDGAN (Kahembwe & Ramamoorthy, 2020) uses low dimensional discriminator to reduce model size and can generate videos with resolution up to $512 \times 512$ , while we decrease training cost by utilizing a pre-trained image generator. The high-quality generation is achieved by using pre-trained image generators, while the motion trajectory is modeled within the latent space. Additionally, learning motion in the latent space allows us to easily adapt the video generation model to the task of video prediction (Denton et al., 2017), in which the starting frame is given (Denton & Fergus, 2018; Zhao et al., 2018; Walker et al., 2017; Villegas et al., 2017b;a; Babaeizadeh et al., 2017; Hsieh et al., 2018; Byeon et al., 2018), by inverting the initial frame through the generator (Abdal et al., 2020), instead of training an extra image encoder (Tulyakov et al., 2018; Zhang et al., 2020).
|
| 24 |
+
|
| 25 |
+
Interpretable Latent Directions. The latent space of GANs is known to consist of semantically meaningful vectors for image manipulation. Both supervised methods, either using human annotations or pre-trained image classifiers (Goetschalckx et al., 2019; Shen et al., 2020), and unsupervised methods (Jahanian et al., 2020; Plumerault et al., 2020), are able to find interpretable directions for image editing, such as supervising directions for image rotation or background removal (Voynov &
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
Figure 1: Left: Given an initial latent code $\mathbf{z}_1$ , a trajectory $\epsilon_t$ , and a PCA basis $\mathbf{V}$ , the motion generator $G_{\mathrm{M}}$ encodes $\mathbf{z}_1$ using $\mathrm{LSTM}_{\mathrm{enc}}$ to get the initial hidden state and uses $\mathrm{LSTM}_{\mathrm{dec}}$ to estimate hidden states for future frames. The image generator $G_{\mathrm{I}}$ synthesizes images using the predicted latent codes. The discriminator $D_{\mathrm{V}}$ is trained on both real and generated video sequences. Right: For each generated video, the first and subsequent frames are sent to an image discriminator $D_{\mathrm{I}}$ . An encoder-like network $F$ calculates the features of synthesized images used to compute the contrastive loss $\mathcal{L}_{\mathrm{contr}}$ with positive (same image content, but different augmentation, shown in blue) and negative pairs (different image content and augmentation, shown in red).
|
| 29 |
+
|
| 30 |
+
Babenko, 2020; Shen & Zhou, 2020). We further consider the motion vectors in the latent space. By disentangling the motion trajectories in an unsupervised fashion, we are able to transfer the motion information from a video dataset to an image dataset in which no temporal information is available.
|
| 31 |
+
|
| 32 |
+
Contrastive Representation Learning is widely studied in unsupervised learning tasks (He et al., 2020; Chen et al., 2020a;b; Henaff et al., 2020; Löwe et al., 2019; Oord et al., 2018; Misra & Maaten, 2020). Related inputs, such as images (Wu et al., 2018) or latent representations (Hjelm et al., 2019), which can vary while training due to data augmentation, are forced to be close by minimizing differences in their representation during training. Recent work (Park et al., 2020) applies noise-contrastive estimation (Gutmann & Hyvärinen, 2010) to image generation tasks by learning the correspondence between image patches, achieving performance superior to that attained when using cycle-consistency constraints (Zhu et al., 2017; Yi et al., 2017). On the other hand, we learn an image discriminator to create videos with coherent content by leveraging contrastive loss (Hadsell et al., 2006) along with an adversarial loss (Goodfellow et al., 2014).
|
| 33 |
+
|
| 34 |
+
# 3 METHOD
|
| 35 |
+
|
| 36 |
+
In this section, we introduce our method for high-resolution video generation. Our framework is built on top of a pre-trained image generator (Karras et al., 2020a;b; Zhao et al., 2020a;b), which helps to generate high-quality image frames and boosts the training efficiency with manageable computational resources. In addition, with the image generator fixed during training, we can disentangle video motion from image content, and enable video synthesis even when the image content and the video motion come from different domains.
|
| 37 |
+
|
| 38 |
+
More specifically, our inference framework includes a motion generator $G_{\mathrm{M}}$ and an image generator $G_{\mathrm{I}}$ . $G_{\mathrm{M}}$ is implemented with two LSTM networks (Hochreiter & Schmidhuber, 1997) and predicts the latent motion trajectory $\mathbf{Z} = \{\mathbf{z}_1, \mathbf{z}_2, \dots, \mathbf{z}_n\}$ , where $n$ is the number of frames in the synthesized video. The image generator $G_{\mathrm{I}}$ can thus synthesize each individual frame from the motion trajectory. The generated video sequence $\tilde{\mathbf{v}}$ is given by $\tilde{\mathbf{v}} = \{\tilde{\mathbf{x}}_1, \tilde{\mathbf{x}}_2, \dots, \tilde{\mathbf{x}}_n\}$ . For each synthesized frame $\tilde{\mathbf{x}}_t$ , we have $\tilde{\mathbf{x}}_t = G_{\mathrm{I}}(\mathbf{z}_t)$ for $t = 1, 2, \dots, n$ . We also define the real video clip as $\mathbf{v} = \{\mathbf{x}_1, \mathbf{x}_2, \dots, \mathbf{x}_n\}$ and the training video distribution as $p_v$ .
|
| 39 |
+
|
| 40 |
+
To train the motion generator $G_{\mathrm{M}}$ to discover the desired motion trajectory, we apply a video discriminator to constrain the generated motion patterns to be similar to those of the training videos, and an image discriminator to force the frame content to be temporally consistent. Our framework is illustrated in Fig. 1. We describe each component in more detail in the following sections.
|
| 41 |
+
|
| 42 |
+
# 3.1 MOTION GENERATOR
|
| 43 |
+
|
| 44 |
+
The motion generator $G_{\mathrm{M}}$ predicts consecutive latent codes using an input code $\mathbf{z}_1 \in \mathcal{Z}$ , where the latent space $\mathcal{Z}$ is also shared by the image generator. For BigGAN (Brock et al., 2019), we sample $\mathbf{z}_1$ from the normal distribution $p_z$ . For StyleGAN2 (Karras et al., 2020b), $p_z$ is the distribution after the multi-layer perceptron (MLP), as the latent codes within this distribution can be semantically disentangled better than when using the normal distribution (Shen et al., 2020; Zhu et al., 2020).
|
| 45 |
+
|
| 46 |
+
Formally, $G_{\mathrm{M}}$ includes an LSTM encoder $\mathrm{LSTM}_{\mathrm{enc}}$ , which encodes $\mathbf{z}_1$ to get the initial hidden state, and a LSTM decoder $\mathrm{LSTM}_{\mathrm{dec}}$ , which estimates $n - 1$ continuous hidden states recursively:
|
| 47 |
+
|
| 48 |
+
$$
|
| 49 |
+
\mathbf {h} _ {1}, \mathbf {c} _ {1} = \operatorname {L S T M} _ {\text {e n c}} (\mathbf {z} _ {1}),
|
| 50 |
+
$$
|
| 51 |
+
|
| 52 |
+
$$
|
| 53 |
+
\mathbf {h} _ {t}, \mathbf {c} _ {t} = \operatorname {L S T M} _ {\mathrm {d e c}} \left(\epsilon_ {t}, \left(\mathbf {h} _ {t - 1}, \mathbf {c} _ {t - 1}\right)\right), \quad t = 2, 3, \dots , n, \tag {1}
|
| 54 |
+
$$
|
| 55 |
+
|
| 56 |
+
where $\mathbf{h}$ and $\mathbf{c}$ denote the hidden state and cell state respectively, and $\epsilon_{t}$ is a noise vector sampled from the normal distribution to model the motion diversity at timestamp $t$ .
|
| 57 |
+
|
| 58 |
+
Motion Disentanglement. Prior work (Tulyakov et al., 2018) applies $\mathbf{h}_t$ as the motion code for the frame to be generated, while the content code is fixed for all frames. However, such a design requires a recurrent network to estimate the motion while preserving consistent content from the latent vector, which is difficult to learn in practice. Instead, we propose to use a sequence of motion residuals for estimating the motion trajectory. Specifically, we model the motion residual as the linear combination of a set of interpretable directions in the latent space (Shen & Zhou, 2020; Harkonen et al., 2020). We first conduct principal component analysis (PCA) on $m$ randomly sampled latent vectors from $\mathcal{Z}$ to get the basis $\mathbf{V}$ . Then, we estimate the motion direction from the previous frame $\mathbf{z}_{t-1}$ to the current frame $\mathbf{z}_t$ by using $\mathbf{h}_t$ and $\mathbf{V}$ as follows:
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
\mathbf {z} _ {t} = \mathbf {z} _ {t - 1} + \lambda \cdot \mathbf {h} _ {t} \cdot \mathbf {V}, \quad t = 2, 3, \dots , n, \tag {2}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
where the hidden state $\mathbf{h}_t\in [-1,1]$ , and $\lambda$ controls the step given by the residual. With Eqn. 1 and Eqn. 2, we have $G_{\mathrm{M}}(\mathbf{z}_1) = \{\mathbf{z}_1,\mathbf{z}_2,\dots ,\mathbf{z}_n\}$ , and the generated video $\tilde{\mathbf{v}}$ is given as $\tilde{\mathbf{v}} = G_{\mathrm{I}}(G_{\mathrm{M}}(\mathbf{z}_1))$ .
|
| 65 |
+
|
| 66 |
+
Motion Diversity. In Eqn. 1, we introduce a noise vector $\epsilon_{t}$ to control the diversity of motion. However, we observe that the LSTM decoder tends to neglect the $\epsilon_{t}$ , resulting in motion mode collapse, meaning that $G_{\mathrm{M}}$ cannot capture the diverse motion patterns from training videos and generate distinct videos from one initial latent code with similar motion patterns for different sequences of noise vectors. To alleviate this issue, we introduce a mutual information loss $\mathcal{L}_{\mathrm{m}}$ to maximize the mutual information between the hidden vector $\mathbf{h}_t$ and the noise vector $\epsilon_{t}$ . With $\mathrm{sim}(\mathbf{u},\mathbf{v}) = \mathbf{u}^T\mathbf{v} / \| \mathbf{u}\| \| \mathbf{v}\|$ denoting the cosine similarity between vectors $\mathbf{u}$ and $\mathbf{v}$ , we define $\mathcal{L}_{\mathrm{m}}$ as follows:
|
| 67 |
+
|
| 68 |
+
$$
|
| 69 |
+
\mathcal {L} _ {\mathrm {m}} = \frac {1}{n - 1} \sum_ {t = 2} ^ {n} \operatorname {s i m} \left(H \left(\mathbf {h} _ {t}\right), \epsilon_ {t}\right), \tag {3}
|
| 70 |
+
$$
|
| 71 |
+
|
| 72 |
+
where $H$ is a 2-layer MLP that serves as a mapping function.
|
| 73 |
+
|
| 74 |
+
Learning. To learn the appropriate parameters for the motion generator $G_{\mathrm{M}}$ , we apply a multi-scale video discriminator $D_{\mathrm{V}}$ to tell whether a video sequence is real or synthesized. The discriminator is based on the architecture of PatchGAN (Isola et al., 2017). However, we use 3D convolutional layers in $D_{\mathrm{V}}$ , as they can model temporal dynamics better than 2D convolutional layers. We divide input video sequence into small 3D patches, and classify each patch as real or fake. The local responses for the input sequence are averaged to produce the final output. Additionally, each frame in the input video sequence is conditioned on the first frame, as it falls into the distribution of the pre-trained image generator, for more stable training. We thus optimize the following adversarial loss to learn $G_{\mathrm{M}}$ and $D_{\mathrm{V}}$ :
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
\mathcal {L} _ {D _ {\mathrm {V}}} = \mathbb {E} _ {\mathbf {v} \sim p _ {v}} \left[ \log D _ {\mathrm {v}} (\mathbf {v}) \right] + \mathbb {E} _ {\mathbf {z} _ {1} \sim p _ {z}} \left[ \log \left(1 - D _ {\mathrm {V}} \left(G _ {\mathrm {I}} \left(G _ {\mathrm {M}} (\mathbf {z} _ {1}))\right)\right)\right) \right]. \tag {4}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
# 3.2 CONTRASTIVE IMAGE DISCRIMINATOR
|
| 81 |
+
|
| 82 |
+
As our image generator is pre-trained, we may use an image generator that is trained on a given domain, e.g. images of animal faces (Choi et al., 2020), and learn the motion generator parameters using videos from a different domain, such as videos of human facial expressions (Nagrani et al.,
|
| 83 |
+
|
| 84 |
+
2017). With Eqn. 4 alone, however, we lack the ability to explicitly constrain the generated images $\tilde{\mathbf{x}}_{t|t > 1}$ to possess similar quality and content as the first image $\tilde{\mathbf{x}}_1$ , which is sampled from the image space of the image generator and thus has high fidelity. Hence, we introduce a contrastive image discriminator $D_{\mathrm{I}}$ , which is illustrated in Fig. 1, to match both image quality and content between $\tilde{\mathbf{x}}_1$ and $\tilde{\mathbf{x}}_{t|t > 1}$ .
|
| 85 |
+
|
| 86 |
+
Quality Matching. To increase the perceptual quality, we train $D_{\mathrm{I}}$ and $G_{\mathrm{M}}$ adversarially by forwarding $\tilde{\mathbf{x}}_t$ into the discriminator $D_{\mathrm{I}}$ and using $\tilde{\mathbf{x}}_1$ as real sample and $\tilde{\mathbf{x}}_{t|t > 1}$ as the fake sample.
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\mathcal {L} _ {D _ {1}} = \mathbb {E} _ {\mathbf {z} _ {1} \sim p _ {z}} \left[ \log D _ {1} \left(G _ {1} \left(\mathbf {z} _ {1}\right)\right) \right] + \mathbb {E} _ {\mathbf {z} _ {1} \sim p _ {z}, \mathbf {z} _ {t} \sim G _ {\mathrm {M}} (\mathbf {z} _ {1}) | t > 1} \left[ \log \left(1 - D _ {1} \left(G _ {1} \left(\mathbf {z} _ {t}\right)\right)\right) \right]. \tag {5}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
Content Matching. To learn content similarity between frames within a video, we use the image discriminator as a feature extractor and train it with a form of contrastive loss known as InfoNCE (Oord et al., 2018). The goal is that pairs of images with the same content should be close together in embedding space, while images containing different content should be far apart.
|
| 93 |
+
|
| 94 |
+
Given a minibatch of $N$ generated videos $\{\tilde{\mathbf{v}}^{(1)},\tilde{\mathbf{v}}^{(2)},\dots ,\tilde{\mathbf{v}}^{(N)}\}$ , we randomly sample one frame $t$ from each video: $\{\tilde{\mathbf{x}}_t^{(1)},\tilde{\mathbf{x}}_t^{(2)},\dots ,\tilde{\mathbf{x}}_t^{(N)}\}$ , and make two randomly augmented versions $(\tilde{\mathbf{x}}_t^{(ia)},\tilde{\mathbf{x}}_t^{(ib)})$ for each frame $\tilde{\mathbf{x}}_t^{(i)}$ , resulting in $2N$ samples. $(\tilde{\mathbf{x}}_t^{(ia)},\tilde{\mathbf{x}}_t^{(ib)})$ are positive pairs, as they share the same content. $(\tilde{\mathbf{x}}_t^{(i\cdot)},\tilde{\mathbf{x}}_t^{(j\cdot)})$ are all negative pairs for $i\neq j$ .
|
| 95 |
+
|
| 96 |
+
Let $F$ be an encoder network, which shares the same weights and architecture of $D_{\mathrm{I}}$ , but excluding the last layer of $D_{\mathrm{I}}$ and including a 2-layer MLP as a projection head that produces the representation of the input images. We have a contrastive loss function $\mathcal{L}_{\mathrm{contr}}$ , which is the cross-entropy computed across $2N$ augmentations as follows:
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
\mathcal {L} _ {\text {c o n t r}} = - \sum_ {i = 1} ^ {N} \sum_ {\alpha = a} ^ {b} \log \frac {\exp \left(\sin \left(F \left(\tilde {\mathbf {x}} _ {t} ^ {(i a)}\right) , F \left(\tilde {\mathbf {x}} _ {t} ^ {(i b)}\right)\right) / \tau\right)}{\sum_ {j = 1} ^ {N} \sum_ {\beta = a} ^ {b} \mathbb {1} _ {[ j \neq i ]} \left( \right.\exp \left(\sin \left(F \left(\tilde {\mathbf {x}} _ {t} ^ {(i \alpha)}\right), F \left(\tilde {\mathbf {x}} _ {t} ^ {(j \beta)}\right)\right) / \tau\right)}, \tag {6}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
where $\mathrm{sim}(\cdot ,\cdot)$ is the cosine similarity function defined in Eqn. 3, $\mathbb{1}_{[j\neq i]}\in \{0,1\}$ is equal to 1 iff $j\neq i$ , and $\tau$ is a temperature parameter empirically set to 0.07 . We use a momentum decoder mechanism similar to that of MoCo (He et al., 2020) by maintaining a memory bank to delete the oldest negative pairs and update the new negative pairs. We apply augmentation methods including translation, color jittering, and cutout (DeVries & Taylor, 2017) on synthesized images. With the positive and negative pairs generated on-the-fly during training, the discriminator can effectively focus on the content of the input samples.
|
| 103 |
+
|
| 104 |
+
The choice of positive pairs in Eqn. 6 is specifically designed for cross-domain video synthesis, as videos of arbitrary content from the image domain is not available. In the case that images and videos are from the same domain, the positive and negative pairs are easier to obtain. We randomly select and augment two frames from a real video to create positive pairs sharing the same content, while the negative pairs contain augmented images from different real videos.
|
| 105 |
+
|
| 106 |
+
Aside from $\mathcal{L}_{\mathrm{contr}}$ , we also adopt the feature matching loss (Wang et al., 2018b) $\mathcal{L}_{\mathrm{f}}$ between the generated first frame and other frames by changing the $L_{1}$ regularization to cosine similarity.
|
| 107 |
+
|
| 108 |
+
Full Objective. The overall loss function for training motion generator, video discriminator, and image discriminator is thus defined as:
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
\min _ {G _ {\mathrm {M}}} \left(\max _ {D _ {\mathrm {V}}} \mathcal {L} _ {D _ {\mathrm {V}}} + \max _ {D _ {\mathrm {I}}} \mathcal {L} _ {D _ {\mathrm {I}}}\right) + \max _ {G _ {\mathrm {M}}} \left(\lambda_ {\mathrm {m}} \mathcal {L} _ {\mathrm {m}} + \lambda_ {\mathrm {f}} \mathcal {L} _ {\mathrm {f}}\right) + \min _ {D _ {\mathrm {I}}} \left(\lambda_ {\text {c o n t r}} \mathcal {L} _ {\text {c o n t r}}\right) \tag {7}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
where $\lambda_{\mathrm{m}}, \lambda_{\mathrm{contr}}$ , and $\lambda_{\mathrm{f}}$ are hyperparameters to balance losses.
|
| 115 |
+
|
| 116 |
+
# 4 EXPERIMENTS
|
| 117 |
+
|
| 118 |
+
In this section, we evaluate the proposed approach on several benchmark datasets for video generation. We also demonstrate cross-domain video synthesis for various image and video datasets.
|
| 119 |
+
|
| 120 |
+
# 4.1 VIDEO GENERATION
|
| 121 |
+
|
| 122 |
+
We conduct experiments on three datasets including UCF-101 (Soomro et al., 2012), FaceForensics (Rössler et al., 2018), and Sky Time-lapse (Xiong et al., 2018) for unconditional video synthesis. We use StyleGAN2 as the image generator. Training details can be found in Appx. B.
|
| 123 |
+
|
| 124 |
+
Table 1: IS and FVD on UCF-101.
|
| 125 |
+
|
| 126 |
+
<table><tr><td>Method</td><td>IS (↑)</td><td>FVD (↓)</td></tr><tr><td>VGAN</td><td>8.31 ± .09</td><td>-</td></tr><tr><td>TGAN</td><td>11.85 ± .07</td><td>-</td></tr><tr><td>MoCoGAN</td><td>12.42 ± .07</td><td>-</td></tr><tr><td>ProgressiveVGAN</td><td>14.56 ± .05</td><td>-</td></tr><tr><td>LDVD-GAN</td><td>22.91 ± .19</td><td>-</td></tr><tr><td>TGANv2</td><td>26.60 ± .47</td><td>1209 ± 28</td></tr><tr><td>DVD-GAN</td><td>27.38 ± .53</td><td>-</td></tr><tr><td>Ours</td><td>33.95 ± .25</td><td>700 ± 24</td></tr></table>
|
| 127 |
+
|
| 128 |
+
Table 2: FVD, ACD, and Human Preference on FaceForensics.
|
| 129 |
+
|
| 130 |
+
<table><tr><td>Method</td><td>FVD (↓)</td><td>ACD (↓)</td></tr><tr><td>GT</td><td>9.02</td><td>0.2935</td></tr><tr><td>TGANv2</td><td>58.03</td><td>0.4914</td></tr><tr><td>Ours</td><td>53.26</td><td>0.3300</td></tr><tr><td>Method</td><td colspan="2">Human Preference (%)</td></tr><tr><td>Ours / TGANv2</td><td colspan="2">73.6 / 26.4</td></tr></table>
|
| 131 |
+
|
| 132 |
+
UCF-101 is widely used in video generation. The dataset includes 13,320 videos of 101 sport categories. The resolution of each video is $320 \times 240$ . To process the data, we crop a rectangle with size of $240 \times 240$ from each frame in a video and resize it to $256 \times 256$ . We train the motion generator to predict 16 frames. For evaluation, we report Inception Score (IS) (Saito et al., 2020) on 10,000 generated videos and Fréchet Video Distance (FVD) (Unterthiner et al., 2018) on 2,048 videos. The classifier used to calculate IS is a C3D network (Tran et al., 2015) that is trained on the Sports-1M dataset (Karpathy et al., 2014) and fine-tuned on UCF-101, which is the same model used in previous works (Saito et al., 2020; Clark et al., 2019).
|
| 133 |
+
|
| 134 |
+
The quantitative results are shown in Tab. 1. Our method achieves state-of-the-art results for both IS and FVD, and outperforms existing works by a large margin. Interestingly, this result indicates that a well-trained image generator has learned to represent rich motion patterns, and therefore can be used to synthesize high-quality videos when used with a well-trained motion generator.
|
| 135 |
+
|
| 136 |
+
FaceForensics is a dataset containing news videos featuring various reporters. We use all the images from 704 training videos, with a resolution of $256 \times 256$ , to learn an image generator, and sequences of 16 consecutive frames to train motion generator. Note that our network can generate even longer continuous sequences, e.g. 64 frames (Fig. 12 in Appx.), though only 16 frames are used for training.
|
| 137 |
+
|
| 138 |
+
We show the FVD between generated and real video clips (16 frames in length) for different methods in Tab. 2. Additionally, we use the Average Content Distance (ACD) from MoCoGAN (Tulyakov et al., 2018) to evaluate the identity consistency for these human face videos. We calculate ACD values over 256 videos. We also report the two metrics for ground truth (GT) videos. To get FVD of GT videos, we randomly sample two groups of real videos and compute the score. Our method achieves better results than TGANv2 (Saito et al., 2020). Both methods have low FVD values, and can generate complex motion patterns
|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
Figure 2: Example generated videos from a model trained on FaceForensics. We can generate natural and photo-realistic videos with various motion patterns, such as eye blink and talking. Four examples show frames 2, 7, 11, and 16.
|
| 142 |
+
|
| 143 |
+

|
| 144 |
+
|
| 145 |
+
close to the real data. However, the much lower ACD value of our approach, which is close to GT, demonstrates that the videos it synthesizes have much better identity consistency than the videos from TGANv2. Qualitative examples in Fig. 2 illustrate different motions patterns learned from the dataset. Furthermore, we perform perceptual experiments using Amazon Mechanical Turk (AMT) by presenting a pair of videos from the two methods to users and asking them to select a more realistic one. Results in Tab. 2 indicate our method outperforms TGANv2 in $73.6\%$ of the comparisons.
|
| 146 |
+
|
| 147 |
+
Sky Time-Lapse is a video dataset consisting of dynamic sky scenes, such as moving clouds. The number of video clips for training and testing is 35, 392 and 2, 815, respectively. We resize images to $128 \times 128$ and train the model to generate 16 frames. We compare our methods with the two recent approaches of MDGAN (Xiong et al., 2018) and DTVNet (Zhang et al., 2020), which are specifically designed for this dataset. In Tab. 3, we report the FVD for all three methods. It is clear that our approach significantly outperforms the others. Example sequences are shown in Fig. 3.
|
| 148 |
+
|
| 149 |
+
Following DTVNet (Zhang et al., 2020), we evaluate the proposed model for the task of video prediction. We use the Peak Signal-to-Noise Ratio (PSNR) and Structural Similarity (SSIM) (Wang et al., 2004) as evaluation metrics to measure the frame quality at the pixel level and the structural similarity between synthesized and real video frames. Evaluation is performed on the testing set. We select the first frame $\mathbf{x}_1$ from each video clip and project it to the latent space of the image generator (Abdal et al., 2020) to get $\hat{\mathbf{z}}_1$ . We use $\hat{\mathbf{z}}_1$ as the starting latent code for motion generator to get 16 latent codes, and interpolate them to get 32 latent codes to synthesize a video sequence, where the first frame is given by $G_{\mathrm{I}}(\hat{\mathbf{z}}_1)$ . For a fair comparison, we also use $G_{\mathrm{I}}(\hat{\mathbf{z}}_1)$ as the starting frame for MDGAN and DTVNet to calculate the metrics with ground truth videos. In addition, we calculate the PSNR and SSIM between $\mathbf{x}_1$ and $G_{\mathrm{I}}(\hat{\mathbf{z}}_1)$ as the upper bound for all methods, which we denote as $Up-B$ . Tab. 3 shows the video prediction results, which demonstrate that our method's performance is superior to those of MDGAN and DTVNet. Interestingly, by simply interpolating the motion trajectory, we can easily generate longer video sequence, e.g. from 16 to 32 frames, while retaining high quality.
|
| 150 |
+
|
| 151 |
+
Table 3: Evaluation on Sky Time-lapse for video synthesis and prediction.
|
| 152 |
+
|
| 153 |
+
<table><tr><td>Method</td><td>FVD (↓)</td><td>PSNR (↑)</td><td>SSIM (↑)</td></tr><tr><td>Up-B</td><td>-</td><td>25.367</td><td>0.781</td></tr><tr><td>MDGAN</td><td>840.95</td><td>13.840</td><td>0.581</td></tr><tr><td>DTVNet</td><td>451.14</td><td>21.953</td><td>0.531</td></tr><tr><td>Ours</td><td>77.77</td><td>22.286</td><td>0.688</td></tr></table>
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
Figure 3: Sample generated frames at several time steps $(t)$ for the Sky Time-lapse dataset.
|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
Figure 4: Example sequences for cross-domain video generation. First Row: (FFHQ, VoxCeleb). Second Row: (LSUN-Church, TLVDB). Third Row: (AFHQ-Dog, VoxCeleb). Fourth Row: (AnimateFaces, VoxCeleb). Images in the first and second rows have a resolution of $256 \times 256$ , while the third and fourth rows have a resolution of $512 \times 512$ .
|
| 160 |
+
|
| 161 |
+
# 4.2 CROSS-DOMAINVIDEOGENERATION
|
| 162 |
+
|
| 163 |
+
To demonstrate how our approach can disentangle motion from image content and transfer motion patterns from one domain to another, we perform several experiments on various datasets. More specifically, we use the StyleGAN2 model, pre-trained on the FFHQ (Karras et al., 2019), AFHQ-Dog (Choi et al., 2020), AnimeFaces (Branwen, 2019), and LSUN-Church (Yu et al., 2015) datasets, as the image generators. We learn human facial motion from VoxCeleb (Nagrani et al., 2020) and
|
| 164 |
+
|
| 165 |
+
time-lapse transitions in outdoor scenes from TLVDB (Shih et al., 2013). In these experiments, a pair such as (FFHQ, VoxCeleb) indicates that we synthesize videos with image content from FFHQ and motion patterns from VoxCeleb. We generate videos with a resolution of $256 \times 256$ and $1024 \times 1024$ for FFHQ, $512 \times 512$ for AFHQ-Dog and AnimeFaces, and $256 \times 256$ for LSUN-Church. Qualitative examples for (FFHQ, VoxCeleb), (LSUN-Church, TLVDB), (AFHQ-Dog, VoxCeleb), and (AnimeFaces, VoxCeleb) are shown in Fig. 4, depicting high-quality and temporally consistent videos (more videos, including results with BigGAN as the image generator, are shown in the Appendix).
|
| 166 |
+
|
| 167 |
+
We also demonstrate how the motion and content are disentangled in Fig. 5 and Fig. 6, which portray generated videos with the same identity but performing diverse motion patterns, and the same motion applied to different identities, respectively. We show results from (AFHQ-Dog, VoxCeleb) (first two rows) and (AnimeFaces, VoxCeleb) (last two rows) in these two figures.
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
Figure 5: The first and second row (also the third and fourth row) share the same initial content code but with different motion codes.
|
| 171 |
+
|
| 172 |
+

|
| 173 |
+
Figure 6: The first and second row (also the third and fourth row) share the same motion code but with different content codes.
|
| 174 |
+
|
| 175 |
+
# 4.3 ABLATION ANALYSIS
|
| 176 |
+
|
| 177 |
+
We first report IS and FVD in Tab. 4 for UCF-101 using the following methods: $w / o$ Eqn. 2 uses $\mathbf{z}_t = \mathbf{h}_t$ instead of estimating the residual as in Eqn. 2; $w / o$ $D_{\mathrm{I}}$ omits the contrastive image discriminator $D_{\mathrm{I}}$ and uses the video discriminator $D_{\mathrm{V}}$ only for learning the motion generator; $w / o$ $D_{\mathrm{V}}$ omits $D_{\mathrm{V}}$ during training; and Full-128 and Full-256 indicate that we generate videos using our full method with resolutions of $128 \times 128$ and $256 \times 256$ , respectively. We resize frames for all methods to $128 \times 128$ when calculating IS and FVD. The full method outperforms all others, proving the importance of each module for learning temporally consistent and high-quality videos.
|
| 178 |
+
|
| 179 |
+
We perform further analysis of our cross-domain video generation on (FFHQ, VoxCeleb). We compare our full method (Full) with two variants. $w / o \mathcal{L}_{\mathrm{contr}}$ denotes that we omit the contrastive loss (Eqn. 6) from $D_{\mathrm{I}}$ , and $w / o \mathcal{L}_{\mathrm{m}}$ indicates that we omit the mutual information loss (Eqn. 3) for the motion generator. The results in Tab. 5 demonstrate that $\mathcal{L}_{\mathrm{contr}}$ is beneficial for learning videos with coherent content, as employing $\mathcal{L}_{\mathrm{contr}}$ results in lower ACD values and higher human preferences. $\mathcal{L}_{\mathrm{m}}$ also contributes to generating higher quality videos by mitigating motion synchronization. To validate the motion diversity, we show pairs of 9 randomly generated videos from the two methods to users and ask them to choose which one has superior motion diversity, including rotations and facial expressions. User preference suggests that using $\mathcal{L}_{\mathrm{m}}$ increases motion diversity.
|
| 180 |
+
|
| 181 |
+
Table 4: Ablation study on UCF-101.
|
| 182 |
+
|
| 183 |
+
<table><tr><td>Method</td><td>IS (↑)</td><td>FVD (↓)</td></tr><tr><td>w/o Eqn. 2</td><td>28.20</td><td>790.87</td></tr><tr><td>w/o DI</td><td>33.22</td><td>796.67</td></tr><tr><td>w/o DV</td><td>33.84</td><td>867.43</td></tr><tr><td>Full-128</td><td>32.36</td><td>838.09</td></tr><tr><td>Full-256</td><td>33.95</td><td>700.00</td></tr></table>
|
| 184 |
+
|
| 185 |
+
Table 5: Ablation study on (FFHQ, VoxCeleb).
|
| 186 |
+
|
| 187 |
+
<table><tr><td>Method</td><td>w/o Lcontr</td><td>w/o Lm</td><td>Full</td></tr><tr><td>ACD (↓)</td><td>0.5328</td><td>0.5158</td><td>0.4353</td></tr><tr><td colspan="2">Method</td><td colspan="2">Human Preference (%)</td></tr><tr><td colspan="2">Full vs w/o Lcontr</td><td colspan="2">68.3 / 31.7</td></tr><tr><td colspan="2">Full vs w/o Lm</td><td colspan="2">64.4 / 35.6</td></tr></table>
|
| 188 |
+
|
| 189 |
+
# 4.4 LONG SEQUENCE GENERATION
|
| 190 |
+
|
| 191 |
+
Due to the limitation of computational resources, we train MoCoGAN-HD to synthesize 16 consecutive frames. However, we can generate longer video sequences during inference by applying the following two ways.
|
| 192 |
+
|
| 193 |
+
Motion Generator Unrolling. For motion generator, we can run the LSTM decoder for more steps to synthesize long video sequences. In Fig. 7, we show a synthesized video example of 64 frames using the model trained on the FaceForensics dataset. Our method is capable to synthesize videos with more frames than the number of frames used for training.
|
| 194 |
+
|
| 195 |
+

|
| 196 |
+
Figure 7: The generation of a 64-frame video using a model trained with 16-frame on FaceForensics.
|
| 197 |
+
|
| 198 |
+
Motion Interpolation. We can do interpolation on the motion trajectory directly to synthesize long videos. Fig. 8 shows an interpolation example of 32-frame on (AFHQ-Dog, VoxCeleb) dataset.
|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
Figure 8: The generation of a 32-frame video on (AFHQ-Dog, VoxCeleb) by doing the interpolation on motion trajectory.
|
| 202 |
+
|
| 203 |
+
# 5 CONCLUSION
|
| 204 |
+
|
| 205 |
+
In this work, we present a novel approach to video synthesis. Building on contemporary advances in image synthesis, we show that a good image generator and our framework are essential ingredients to boost video synthesis fidelity and resolution. The key is to find a meaningful trajectory in the image generator's latent space. This is achieved using the proposed motion generator, which produces a sequence of motion residuals, with the contrastive image discriminator and video discriminator. This disentangled representation further extends applications of video synthesis to content and motion manipulation and cross-domain video synthesis. The framework achieves superior results on a variety of benchmarks and reaches resolutions unattainable by prior state-of-the-art techniques.
|
| 206 |
+
|
| 207 |
+
# REFERENCES
|
| 208 |
+
|
| 209 |
+
Rameen Abdal, Yipeng Qin, and Peter Wonka. Image2stylegan++: How to edit the embedded images? In CVPR, 2020.
|
| 210 |
+
Dinesh Acharya, Zhiwu Huang, Danda Pani Paudel, and Luc Van Gool. Towards high resolution video generation with progressive growing of sliced wasserstein gans. arXiv:1810.02419, 2018.
|
| 211 |
+
Mohammad Babaeizadeh, Chelsea Finn, Dumitru Erhan, Roy H Campbell, and Sergey Levine. Stochastic variational video prediction. In ICLR, 2017.
|
| 212 |
+
Aayush Bansal, Shugao Ma, Deva Ramanan, and Yaser Sheikh. Recycle-gan: Unsupervised video retargeting. In ECCV, 2018.
|
| 213 |
+
Gwern Branwen. Making anime faces with stylegan. 2019.
|
| 214 |
+
Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale GAN training for high fidelity natural image synthesis. In ICLR, 2019.
|
| 215 |
+
Wonmin Byeon, Qin Wang, Rupesh Kumar Srivastava, and Petros Koumoutsakos. Contextyp: Fully context-aware video prediction. In ECCV, 2018.
|
| 216 |
+
Caroline Chan, Shiry Ginosar, Tinghui Zhou, and Alexei A Efros. Everybody dance now. In ICCV, 2019.
|
| 217 |
+
Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In ICML, 2020a.
|
| 218 |
+
Ting Chen, Simon Kornblith, Kevin Swersky, Mohammad Norouzi, and Geoffrey Hinton. Big self-supervised models are strong semi-supervised learners. arXiv:2006.10029, 2020b.
|
| 219 |
+
Yunjey Choi, Youngjung Uh, Jaejun Yoo, and Jung-Woo Ha. Stargan v2: Diverse image synthesis for multiple domains. In CVPR, 2020.
|
| 220 |
+
Aidan Clark, Jeff Donahue, and Karen Simonyan. Adversarial video generation on complex datasets. arXiv, 2019.
|
| 221 |
+
Emily Denton and Rob Fergus. Stochastic video generation with a learned prior. In ICML, 2018.
|
| 222 |
+
Emily L Denton et al. Unsupervised learning of disentangled representations from video. In Advances in neural information processing systems, pp. 4414-4423, 2017.
|
| 223 |
+
Terrance DeVries and Graham W Taylor. Improved regularization of convolutional neural networks with cutout. arXiv:1708.04552, 2017.
|
| 224 |
+
Jeff Donahue and Karen Simonyan. Large scale adversarial representation learning. In NeurIPS, 2019.
|
| 225 |
+
Frederik Ebert, Chelsea Finn, Alex X Lee, and Sergey Levine. Self-supervised visual planning with temporal skip connections. arXiv preprint arXiv:1710.05268, 2017.
|
| 226 |
+
Lore Goetschalckx, Alex Andonian, Aude Oliva, and Phillip Isola. Ganalyze: Toward visual definitions of cognitive image properties. In ICCV, 2019.
|
| 227 |
+
Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In NeurIPS, 2014.
|
| 228 |
+
Michael Gutmann and Aapo Hyvarinen. Noise-contrastive estimation: A new estimation principle for unnormalized statistical models. In AISTATS, 2010.
|
| 229 |
+
Raia Hadsell, Sumit Chopra, and Yann LeCun. Dimensionality reduction by learning an invariant mapping. In CVPR, 2006.
|
| 230 |
+
Erik Härkönen, Aaron Hertzmann, Jaakko Lehtinen, and Sylvain Paris. Ganspace: Discovering interpretable gan controls. arXiv:2004.02546, 2020.
|
| 231 |
+
|
| 232 |
+
Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In CVPR, 2020.
|
| 233 |
+
Olivier J Henaff, Aravind Srinivas, Jeffrey De Fauw, Ali Razavi, Carl Doersch, SM Eslami, and Aaron van den Oord. Data-efficient image recognition with contrastive predictive coding. In ICML, 2020.
|
| 234 |
+
Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In NeurIPs, 2017.
|
| 235 |
+
R Devon Hjelm, Alex Fedorov, Samuel Lavoie-Marchildon, Karan Grewal, Phil Bachman, Adam Trischler, and Yoshua Bengio. Learning deep representations by mutual information estimation and maximization. In ICLR, 2019.
|
| 236 |
+
Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural computation, 1997.
|
| 237 |
+
Jun-Ting Hsieh, Bingbin Liu, De-An Huang, Li F Fei-Fei, and Juan Carlos Niebles. Learning to decompose and disentangle representations for video prediction. In NeurIPS, 2018.
|
| 238 |
+
Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A Efros. Image-to-image translation with conditional adversarial networks. In CVPR, 2017.
|
| 239 |
+
Ali Jahanian, Lucy Chai, and Phillip Isola. On the "steerability" of generative adversarial networks. In ICLR, 2020.
|
| 240 |
+
Qing Jin, Jian Ren, Oliver J. Woodford, Jiazhuo Wang, Geng Yuan Yuan, Yanzhi Wang, and Sergey Tulyakov. Teachers do more than teach: Compressing image-to-image models. arXiv preprint arXiv:2103.03467, 2021.
|
| 241 |
+
Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In ECCV, 2016.
|
| 242 |
+
Emmanuel Kahembwe and Subramanian Ramamoorthy. Lower dimensional kernels for video discriminators. Neural Networks, 132:506-520, 2020.
|
| 243 |
+
Andrej Karpathy, George Toderici, Sanketh Shetty, Thomas Leung, Rahul Sukthankar, and Li Fei-Fei. Large-scale video classification with convolutional neural networks. In CVPR, 2014.
|
| 244 |
+
Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of gans for improved quality, stability, and variation. arXiv:1710.10196, 2017.
|
| 245 |
+
Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In CVPR, 2019.
|
| 246 |
+
Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. arXiv:2006.06676, 2020a.
|
| 247 |
+
Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In CVPR, 2020b.
|
| 248 |
+
Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv:1412.6980, 2014.
|
| 249 |
+
Sindy Löwe, Peter O'Connor, and Bastiaan Veeling. Putting an end to end-to-end: Gradient-isolated learning of representations. In NeurIPS, 2019.
|
| 250 |
+
Ishan Misra and Laurens van der Maaten. Self-supervised learning of pretext-invariant representations. In CVPR, 2020.
|
| 251 |
+
A. Nagrani, J. S. Chung, and A. Zisserman. Voxceleb: a large-scale speaker identification dataset. In INTERSPEECH, 2017.
|
| 252 |
+
Arsha Nagrani, Joon Son Chung, Weidi Xie, and Andrew Zisserman. Voxceleb: Large-scale speaker verification in the wild. Computer Speech & Language, 2020.
|
| 253 |
+
|
| 254 |
+
Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv:1807.03748, 2018.
|
| 255 |
+
Taesung Park, Alexei A. Efros, Richard Zhang, and Jun-Yan Zhu. Contrastive learning for unpaired image-to-image translation. In ECCV, 2020.
|
| 256 |
+
Antoine Plumerault, Hervé Le Borgne, and Céline Hudelot. Controlling generative models with continuous factors of variations. In ICLR, 2020.
|
| 257 |
+
Alec Radford, Luke Metz, and Soumith Chintala. Unsupervised representation learning with deep convolutional generative adversarial networks. In ICLR, 2016.
|
| 258 |
+
Jian Ren, Mengei Chai, Sergey Tulyakov, Chen Fang, Xiaohui Shen, and Jianchao Yang. Human motion transfer from poses in the wild. arXiv:2004.03142, 2020.
|
| 259 |
+
Andreas Rössler, Davide Cozzolino, Luisa Verdoliva, Christian Riess, Justus Thies, and Matthias Nießner. Faceforensics: A large-scale video dataset for forgery detection in human faces. arXiv:1803.09179, 2018.
|
| 260 |
+
Masaki Saito, Eiichi Matsumoto, and Shunta Saito. Temporal generative adversarial nets with singular value clipping. In ICCV, 2017.
|
| 261 |
+
Masaki Saito, Shunta Saito, Masanori Koyama, and Sosuke Kobayashi. Train sparsely, generate densely: Memory-efficient unsupervised training of high-resolution temporal gan. IJCV, 2020.
|
| 262 |
+
Yujun Shen and Bolei Zhou. Closed-form factorization of latent semantics in gans. arXiv:2007.06600, 2020.
|
| 263 |
+
Yujun Shen, Jinjin Gu, Xiaou Tang, and Bolei Zhou. Interpreting the latent space of gans for semantic face editing. In CVPR, 2020.
|
| 264 |
+
Yichang Shih, Sylvain Paris, Frédo Durand, and William T Freeman. Data-driven hallucination of different times of day from a single outdoor photo. TOG, 2013.
|
| 265 |
+
Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv:1409.1556, 2014.
|
| 266 |
+
Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv:1212.0402, 2012.
|
| 267 |
+
Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3d convolutional networks. In ICCV, 2015.
|
| 268 |
+
Sergey Tulyakov, Ming-Yu Liu, Xiaodong Yang, and Jan Kautz. Mocogan: Decomposing motion and content for video generation. In CVPR, 2018.
|
| 269 |
+
Thomas Unterthiner, Sjoerd van Steenkiste, Karol Kurach, Raphael Marinier, Marcin Michalski, and Sylvain Gelly. Towards accurate generative models of video: A new metric & challenges. arXiv:1812.01717, 2018.
|
| 270 |
+
Ruben Villegas, Jimei Yang, Seunghoon Hong, Xunyu Lin, and Honglak Lee. Decomposing motion and content for natural video sequence prediction. In ICLR, 2017a.
|
| 271 |
+
Ruben Villegas, Jimei Yang, Yuliang Zou, Sungryull Sohn, Xunyu Lin, and Honglak Lee. Learning to generate long-term future via hierarchical prediction. In ICML, 2017b.
|
| 272 |
+
Carl Vondrick, Hamed Pirsiavash, and Antonio Torralba. Generating videos with scene dynamics. In NeurIPS, 2016.
|
| 273 |
+
Andrey Voynov and Artem Babenko. Unsupervised discovery of interpretable directions in the gan latent space. In ICML, 2020.
|
| 274 |
+
Jacob Walker, Kenneth Marino, Abhinav Gupta, and Martial Hebert. The pose knows: Video forecasting by generating pose futures. In ICCV, 2017.
|
| 275 |
+
|
| 276 |
+
Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Guilin Liu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. Video-to-video synthesis. In NeurIPS, 2018a.
|
| 277 |
+
Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. High-resolution image synthesis and semantic manipulation with conditional gans. In CVPR, 2018b.
|
| 278 |
+
Ting-Chun Wang, Ming-Yu Liu, Andrew Tao, Guilin Liu, Jan Kautz, and Bryan Catanzaro. Few-shot video-to-video synthesis. In NeurIPS, 2019.
|
| 279 |
+
Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. TIP, 2004.
|
| 280 |
+
Dirk Weissenborn, Oscar Täckström, and Jakob Uszkoreit. Scaling autoregressive video models. In ICLR, 2020.
|
| 281 |
+
Zhirong Wu, Yuanjun Xiong, X Yu Stella, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In CVPR, 2018.
|
| 282 |
+
Wei Xiong, Wenhan Luo, Lin Ma, Wei Liu, and Jiebo Luo. Learning to generate time-lapse videos using multi-stage dynamic generative adversarial networks. In CVPR, 2018.
|
| 283 |
+
Zili Yi, Hao Zhang, Ping Tan, and Minglun Gong. Dualgan: Unsupervised dual learning for image-to-image translation. In ICCV, 2017.
|
| 284 |
+
Fisher Yu, Ari Seff, Yinda Zhang, Shuran Song, Thomas Funkhouser, and Jianxiong Xiao. Lsun: Construction of a large-scale image dataset using deep learning with humans in the loop. arXiv:1506.03365, 2015.
|
| 285 |
+
Vladyslav Yushchenko, Nikita Araslanov, and Stefan Roth. Markov decision process for video generation. In Proceedings of the IEEE International Conference on Computer Vision Workshops, pp. 0-0, 2019.
|
| 286 |
+
Han Zhang, Ian Goodfellow, Dimitris Metaxas, and Augustus Odena. Self-attention generative adversarial networks. In ICML, 2019.
|
| 287 |
+
Jiangning Zhang, Chao Xu, Liang Liu, Mengmeng Wang, Xia Wu, Yong Liu, and Yunliang Jiang. Dtvnet: Dynamic time-lapse video generation via single still image. In ECCV, 2020.
|
| 288 |
+
Long Zhao, Xi Peng, Yu Tian, Mubbasir Kapadia, and Dimitris Metaxas. Learning to forecast and refine residual motion for image-to-video generation. In Proceedings of the European conference on computer vision (ECCV), pp. 387-403, 2018.
|
| 289 |
+
Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han. Differentiable augmentation for data-efficient gan training. arXiv:2006.10738, 2020a.
|
| 290 |
+
Zhengli Zhao, Zizhao Zhang, Ting Chen, Sameer Singh, and Han Zhang. Image augmentations for gan training. arXiv:2006.02595, 2020b.
|
| 291 |
+
Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. In-domain gan inversion for real image editing. In ECCV, 2020.
|
| 292 |
+
Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. Unpaired image-to-image translation using cycle-consistent adversarial networks. In ICCV, 2017.
|
| 293 |
+
|
| 294 |
+
# A ADDITIONAL DETAILS FOR THE FRAMEWORK
|
| 295 |
+
|
| 296 |
+
# A.1 ADDITIONAL DETAILS FOR THE MOTION GENERATOR
|
| 297 |
+
|
| 298 |
+
To use StyleGAN2 (Karras et al., 2020b) as the image generator, we randomly sample 1,000,000 latent codes from the input space $\mathcal{Z}$ and send them to the 8-layer MLPs to get the latent codes in the space of $\mathcal{W}$ . Each latent code is a 512-dimension vector. We perform PCA on these 1,000,000 latent codes and select the top 384 principal components to form the matrix $\mathbf{V} \in \mathbb{R}^{384 \times 512}$ , which is used to model the motion residuals in Eqn. 2. The LSTM encoder and the LSTM decoder in the motion generator both have an input size of 512 and a hidden size of 384. The noise vector $\epsilon_{t}$ in Eqn. 1 is also a 512-dimension vector, and the network $H$ in Eqn. 3 is a 2-layer MLPs with 512 hidden units in each of the two fully-connected layers.
|
| 299 |
+
|
| 300 |
+
For BigGAN (Brock et al., 2019), we sample the latent code directly from the space of $\mathcal{Z}$ .
|
| 301 |
+
|
| 302 |
+
# A.2 ADDITIONAL DETAILS FOR THE DISCRIMINATORS
|
| 303 |
+
|
| 304 |
+
# A.2.1 VIDEO DISCRIMINATOR
|
| 305 |
+
|
| 306 |
+
The input images for the video discriminator $D_{\mathrm{V}}$ are processed at two scales. We downsample the output images from the image generator to the resolution of $128 \times 128$ and $64 \times 64$ . For indomain video synthesis, the input sequences for $D_{\mathrm{V}}$ have the shape of $6 \times (n - 1) \times 128 \times 128$ and $6 \times (n - 1) \times 64 \times 64$ , where $n$ is the sequence length used for training. For each of the $(n - 1)$ subsequent frames, we concatenate the RGB channels of both the first frame and that subsequent frame, resulting in a 6-channel input. For cross-domain video synthesis, the input sequences for $D_{\mathrm{V}}$ have the shape of $3 \times n \times 128 \times 128$ and $3 \times n \times 64 \times 64$ , as the concatenation of the first frame will make the discriminator aware the domain gaps. Details for $D_{\mathrm{V}}$ are shown in Tab. 6.
|
| 307 |
+
|
| 308 |
+
Table 6: The network architecture for video discriminator.
|
| 309 |
+
|
| 310 |
+
<table><tr><td>Operation</td><td>Kernel</td><td>Strides</td><td># Channels</td><td>Norm Type</td><td>Nonlinearity</td></tr><tr><td>Conv3d</td><td>4×4</td><td>2</td><td>64</td><td>-</td><td>Leaky ReLU (0.2)</td></tr><tr><td>Conv3d</td><td>4×4</td><td>2</td><td>128</td><td>InstanceNorm3d</td><td>Leaky ReLU (0.2)</td></tr><tr><td>Conv3d</td><td>4×4</td><td>2</td><td>256</td><td>InstanceNorm3d</td><td>Leaky ReLU (0.2)</td></tr><tr><td>Conv3d</td><td>4×4</td><td>1</td><td>512</td><td>InstanceNorm3d</td><td>Leaky ReLU (0.2)</td></tr><tr><td>Conv3d</td><td>4×4</td><td>1</td><td>1</td><td>-</td><td>-</td></tr></table>
|
| 311 |
+
|
| 312 |
+
# A.2.2 IMAGE DISCRIMINATOR
|
| 313 |
+
|
| 314 |
+
The image discriminator $D_{\mathrm{I}}$ has an architecture based on that of the BigGAN discriminator, except that we remove the self-attention layer. The feature extractor $F$ used for contrastive learning has the same architecture as $D_{\mathrm{I}}$ , except that it does not include the last layer of $D_{\mathrm{I}}$ but has two additional fully connected (FC) layers as the projection head. The number of hidden units for these two FC layers are both 256.
|
| 315 |
+
|
| 316 |
+
Here we describe in more detail the image augmentation and memory bank techniques used for conducting contrastive learning.
|
| 317 |
+
|
| 318 |
+
Image Augmentation. We perform data augmentation on images to create positive and negative pairs. We normalize the images to $[-1, 1]$ and apply the following augmentation techniques.
|
| 319 |
+
|
| 320 |
+
- Affine. We augment each image with an affine transformation defined with three random parameters: rotation $\alpha_{r} \in \mathcal{U}(-180, 180)$ , translation $\alpha_{t} \in \mathcal{U}(-0.1, 0.1)$ , and scale $\alpha_{s} \in \mathcal{U}(0.95, 1.05)$ .
|
| 321 |
+
- Brightness. We add a random value $\alpha_{b} \sim \mathcal{U}(-0.5, 0.5)$ to all channels of each image.
|
| 322 |
+
- **Color.** We add a random value $\alpha_{c} \sim \mathcal{U}(-0.5, 0.5)$ to one randomly-selected channel of each image.
|
| 323 |
+
|
| 324 |
+
- Cutout (DeVries & Taylor, 2017). We mask out pixels in a random subregion of each image to 0. Each subregion starts at a random point and with size $(\alpha_{m}H,\alpha_{m}W)$ , where $\alpha_{m}\sim \mathcal{U}(0,0.25)$ and $(H,W)$ is the image resolution.
|
| 325 |
+
- Flipping. We horizontally flip the image with the probability of 0.5.
|
| 326 |
+
|
| 327 |
+
Memory Bank. It has been shown that contrastive learning benefits from large batch-sizes and negative pairs (Chen et al., 2020b). To increase the number of negative pairs, we incorporate the memory mechanism from MoCo (He et al., 2020), which designates a memory bank to store negative examples. More specifically, we keep an exponential moving average of the image discriminator, and its output of fake video frames are buffered as negative examples. We use a memory bank with a dictionary size of 4, 096.
|
| 328 |
+
|
| 329 |
+
# B MORE DETAILS FOR EXPERIMENTS
|
| 330 |
+
|
| 331 |
+
Image Generators. We train the unconditional StyleGAN2 models from scratch on the UCF-101, FaceForensics, Sky Time-lapse, and AFHQ-Dog datasets. We train the image generators with the official Tensorflow code<sup>2</sup> and select the checkpoints that obtain the best Fréchet inception distance (FID) (Heusel et al., 2017) score to be used as the image generators. The FID score of each image generator is shown in Table 7. For FFHQ, AnimeFaces, and LSUN-Church, we simply use the released pre-trained models.
|
| 332 |
+
|
| 333 |
+
We also train an unconditional BigGAN model on the FFHQ dataset using the public PyTorch code<sup>3</sup>. We train a model with resolution $128 \times 128$ and select the last checkpoint as the image generator.
|
| 334 |
+
|
| 335 |
+
Table 7: FID of our trained StyleGAN2 models on different datasets.
|
| 336 |
+
|
| 337 |
+
<table><tr><td></td><td>UCF-101</td><td>FaceForensics</td><td>Sky Time-lapse</td><td>AFHQ-Dog</td></tr><tr><td>FID</td><td>45.63</td><td>10.99</td><td>10.80</td><td>7.85</td></tr></table>
|
| 338 |
+
|
| 339 |
+
Training Time. We train each image generator for UCF-101, FaceForensics, Sky Time-lapse, and AFHQ-Dog in less than 2 days using 8 Tesla V100 GPUs. For FFHQ, AnimeFaces, and LSUN-Church, we use the released models with no training cost. The training time for video generators ranges from $1.5 \sim 3$ days depending on the datasets (Due to the memory issue, the training for generating videos with resolution of $1,024 \times 1,024$ was done on 8 Quadro RTX 8000, with 5 days). The total training time for all the datasets is $1.5 \sim 5$ days and the estimated cost for training on Google Cloud is $\$0.7\mathrm{K} \sim$ $\$2.3\mathrm{K}$ .
|
| 340 |
+
|
| 341 |
+
Implementation Details. We implement our experiments with PyTorch 1.3.1 and also tested them with PyTorch 1.6. We use the Adam optimizer (Kingma & Ba, 2014) with a learning rate of 0.0001 for $G_{\mathrm{M}}$ , $D_{\mathrm{V}}$ , and $D_{\mathrm{I}}$ in all experiments. In Eqn. 2, we set $\lambda = 0.5$ for conventional video generation tasks and use a smaller $\lambda = 0.2$ for cross-domain video generation, as it improves the content consistency. In Eqn. 7, we set $\lambda_{\mathrm{m}} = \lambda_{\mathrm{contr}} = \lambda_{\mathrm{f}} = 1$ . Grid searching on these hyper-parameters could potentially lead to a performance boost. For TGANv2, we use the released code<sup>4</sup> to train the models on UCF-101 and FaceForensics using 8 Tesla V100 with 16GB of GPU memory.
|
| 342 |
+
|
| 343 |
+
Video Prediction. For video prediction, we predict consecutive frames, given the first frame $\mathbf{x}$ from a test video clip as the input. We find the inverse latent code $\hat{\mathbf{z}}_1$ for $\mathbf{x}_1$ by minimizing the following objective:
|
| 344 |
+
|
| 345 |
+
$$
|
| 346 |
+
\hat {\mathbf {z}} _ {1} = \underset {\hat {\mathbf {z}} _ {1}} {\arg \min } \| \mathbf {x} _ {1} - G _ {\mathrm {I}} (\hat {\mathbf {z}} _ {1}) \| _ {2} + \lambda_ {\mathrm {v g g}} \| F _ {\mathrm {v g g}} (\mathbf {x} _ {1}) - F _ {\mathrm {v g g}} (G _ {\mathrm {I}} (\hat {\mathbf {z}} _ {1})) \| _ {2}, \tag {8}
|
| 347 |
+
$$
|
| 348 |
+
|
| 349 |
+
where $\lambda_{\mathrm{vgg}}$ is the weight for perceptual loss (Johnson et al., 2016), $F_{\mathrm{vgg}}$ is the VGG feature extraction model (Simonyan & Zisserman, 2014). We set $\lambda_{\mathrm{vgg}} = 1$ and optimize Eqn. 8 for 20,000 iterations. We take $\hat{\mathbf{z}}_1$ as the input to our model for video prediction.
|
| 350 |
+
|
| 351 |
+
AMT Experiments. We present more details on the AMT experiments for different experimental settings and datasets. For each experiment, we run 5 iterations to get the averaged score.
|
| 352 |
+
|
| 353 |
+
- FaceForensics, Ours vs TGANv2. We randomly select 300 videos from each method and ask users to select the better one from a pair of videos.
|
| 354 |
+
|
| 355 |
+
- Sky Time-lapse, Ours vs DTVNet. We compare our method with DTVNet on the video prediction task. The testing set of Sky Time-lapse dataset includes 2,815 short video clips. Considering that many of these video clips share similar content and are sampled from 148 long videos, we select 148 short videos with different content for testing. For these videos, we perform inversion (Eqn. 8) on the first frame to get the latent code and generate videos. For DTVNet, we use the first frame directly as input to produce their results. We ask users to chose the one with better video quality from a pair of videos generated by our method and DTVNet. The results shown in Tab. 8 demonstrate the clear advantage of our approach.
|
| 356 |
+
|
| 357 |
+
Table 8: Human evaluation experiments on Sky Time-lapse dataset.
|
| 358 |
+
|
| 359 |
+
<table><tr><td>Method</td><td>Human Preference (%)</td></tr><tr><td>Ours / DTVNet</td><td>77.3 / 22.7</td></tr></table>
|
| 360 |
+
|
| 361 |
+
- $FFHQ$ , Full vs w/o $\mathcal{L}_{\mathrm{contr}}$ . We randomly sample 200 videos generated by each method and ask users to select the more realistic one from a pair of videos.
|
| 362 |
+
- $FFHQ$ , Full vs w/o $\mathcal{L}_{\mathrm{m}}$ . For each method, we use the same content code $\mathbf{z}_1$ to generate 9 videos with different motion trajectories, and organize them into a $3 \times 3$ grid. To conduct AMT experiments, we randomly generate $50 \times 3$ videos for each method and ask users to choose the one with higher motion diversity from a pair of videos.
|
| 363 |
+
|
| 364 |
+
Cross-Domain Video Generation. We provide more details on the image and video datasets.
|
| 365 |
+
|
| 366 |
+
- Image Datasets:
|
| 367 |
+
|
| 368 |
+
- $FFHQ$ (Karras et al., 2019) consists of 70,000 high-quality face images at $1024 \times 1024$ resolution with considerable variation in terms of age, ethnicity, and background.
|
| 369 |
+
- $AFHQ$ -Dog (Choi et al., 2020) contains 5, 239 high-quality dog images at $512 \times 512$ resolution with both training and testing sets.
|
| 370 |
+
- AnimeFaces (Branwen, 2019) includes 2,232,462 anime face images at $512 \times 512$ resolution.
|
| 371 |
+
- LSUN-Church (Yu et al., 2015) includes 126, 227 in-the-wild church images at $256 \times 256$ resolution.
|
| 372 |
+
|
| 373 |
+
Video Datasets:
|
| 374 |
+
|
| 375 |
+
- VoxCeleb (Nagrani et al., 2020) consists of 22,496 short clips of human speech, extracted from interview videos uploaded to YouTube.
|
| 376 |
+
- TLVDB (Shih et al., 2013) includes 463 time-lapse videos, covering a wide range of landscapes and cityscapes.
|
| 377 |
+
|
| 378 |
+
For the video datasets, we randomly select 32 consecutive frames from training videos and select every other frame to form a 16-frame sequence for training.
|
| 379 |
+
|
| 380 |
+
# C MOREVIDEO RESULTS
|
| 381 |
+
|
| 382 |
+
In this section, we provide more qualitative video results generated by our approach. We show the thumbnail from each video in the figures. Full resolution videos are in the supplementary material. We also provide an HTML page to visualize these videos.
|
| 383 |
+
|
| 384 |
+
UCF-101. In Fig. 9, we show videos generated by our approach on the UCF-101 dataset.
|
| 385 |
+
|
| 386 |
+
FaceForensics. In Fig. 10, we show the generated videos for FaceForensics. In Fig. 11 and Fig. 12, we show that our approach can generate long consecutive results, 32 and 64 frames respectively,
|
| 387 |
+
|
| 388 |
+
even when trained with 16-frame clips. In Fig. 13, we demonstrate that our approach can generate diverse motion patterns using the same content code. In Fig. 14, we apply the same motion codes with different content to get the synthesized videos.
|
| 389 |
+
|
| 390 |
+
Sky Time-lapse. Fig. 15 shows the generated videos for the Sky Time-lapse dataset.
|
| 391 |
+
|
| 392 |
+
(FFHQ, VoxCeleb). Fig. 16, Fig. 17, and Fig. 18 present the generated videos that have motion patterns from VoxCeleb and content from FFHQ, with resolutions of $128 \times 128$ , $256 \times 256$ , and $1024 \times 1024$ , respectively. We use BigGAN as the generator for Fig. 16 and StyleGAN2 for Fig. 17 and Fig. 18.
|
| 393 |
+
|
| 394 |
+
(AFHQ-Dog, VoxCeleb). Fig. 19 presents the generated videos that have motion patterns from VoxCeleb and content from AFHQ-Dog. The videos have a resolution of $512 \times 512$ . In Fig. 20, we show the interpolation between every two frames to get longer sequences.
|
| 395 |
+
|
| 396 |
+
(AnimeFaces, VoxCeleb). Fig. 21 shows the generated videos that have motion patterns from VoxCeleb and content from AmimeFaces. The videos have a resolution of $512 \times 512$ .
|
| 397 |
+
|
| 398 |
+
(LSUN-Church, TLVDB). Fig. 22 presents the generated videos that have time-lapse changing style from TLVDB and content from LSUN-Church.
|
| 399 |
+
|
| 400 |
+

|
| 401 |
+
Figure 9: Example videos generated by our approach on the UCF-101 dataset.
|
| 402 |
+
|
| 403 |
+

|
| 404 |
+
Figure 10: Example videos generated by our approach on the FaceForensics dataset.
|
| 405 |
+
|
| 406 |
+

|
| 407 |
+
Figure 11: The generated videos on the FaceForensics dataset consisting of 32 frames.
|
| 408 |
+
|
| 409 |
+

|
| 410 |
+
Figure 12: The generated videos on the FaceForensics dataset consisting of 64 frames.
|
| 411 |
+
|
| 412 |
+

|
| 413 |
+
Figure 13: Each row is synthesized using the same content code to generate diverse motion patterns. Please see the corresponding supplementary video for a better illustration.
|
| 414 |
+
|
| 415 |
+

|
| 416 |
+
Figure 14: Each row is synthesized with the same motion trajectory but different content codes. Please see the corresponding supplementary video for a better illustration.
|
| 417 |
+
|
| 418 |
+

|
| 419 |
+
Figure 15: Example videos generated by our approach on the Sky Time-lapse dataset. The videos have a resolution of $128 \times 128$ .
|
| 420 |
+
|
| 421 |
+

|
| 422 |
+
Figure 16: Cross-domain video generation for (FFHQ, Vox). The videos have a resolution of $128 \times 128$ .
|
| 423 |
+
|
| 424 |
+

|
| 425 |
+
Figure 17: Cross-domain video generation for (FFHQ, Vox). The videos have a resolution of $256 \times 256$ .
|
| 426 |
+
|
| 427 |
+

|
| 428 |
+
Figure 18: Cross-domain video generation for (FFHQ, Vox). The videos have a resolution of $1024 \times 1024$ .
|
| 429 |
+
|
| 430 |
+

|
| 431 |
+
Figure 19: Cross-domain video generation for (AFHQ-Dog, Vox). The videos have a resolution of $512 \times 512$ .
|
| 432 |
+
|
| 433 |
+

|
| 434 |
+
Figure 20: Cross-domain video generation for (AFHQ-Dog, Vox). We interpolate every two frames to get 32 sequential frames. The videos have a resolution of $512 \times 512$ .
|
| 435 |
+
|
| 436 |
+

|
| 437 |
+
Figure 21: Cross-domain video generation for (AnimeFaces, Vox). The videos have a resolution of $512 \times 512$ .
|
| 438 |
+
|
| 439 |
+

|
| 440 |
+
Figure 22: Cross-domain video generation for (LSUN-Church, TLVDB). The videos have a resolution of $256 \times 256$ .
|
| 441 |
+
|
| 442 |
+
# D MORE ABLATION ANALYSIS FOR MUTUAL INFORMATION LOSS $\mathcal{L}_{\mathrm{m}}$
|
| 443 |
+
|
| 444 |
+
In addition to Tab. 5, we perform another ablation experiment to show how mutual information loss $\mathcal{L}_{\mathrm{m}}$ improves motion diversity by considering the following setting. We random sample a content code $z_{1} \in \mathcal{Z}$ and use it as an input to synthesize 100 videos, where each video contains 16 frames. We average the generated 100 videos (they share the same first frame) to get one mean-video, which contains 16 frames. For example, for the last frame in the mean-video, it is obtained by averaging all the last frames from the 100 generated videos. We also calculate the per-pixel standard deviation (std) for each averaged frame in the mean-video. More blurry frames and higher per-pixel std indicate the 100 synthetic videos contain more diverse motion.
|
| 445 |
+
|
| 446 |
+
We evaluate the settings of Full and $w / o \mathcal{L}_{\mathrm{m}}$ (without using the mutual information loss) by running the above experiments for 50 times, e.g., sampling $z_{1}$ for 50 times. Across the 50 trials, for Full model, the mean and std of the per-pixel std for the $16^{th}$ frame (the last frame in a generated video) is $0.233 \pm 0.036$ , which is significantly higher than that of the $w / o \mathcal{L}_{\mathrm{m}}$ model $(0.126 \pm 0.025)$ . In Fig. 23, we show 8 examples of the last frame from the mean-video and the images with per-pixel std (See supplementary material for the whole videos). Our Full model has more diverse motion as the averaged frame is more blurry and the per-pixel std is higher. Note that StyleGAN2 enables noise inputs for extra randomness, we disable it in this ablation study.
|
| 447 |
+
|
| 448 |
+

|
| 449 |
+
Figure 23: Row 1 and 3: The last frame of the mean-video and per-pixel std of $w / o$ $\mathcal{L}_{\mathrm{m}}$ model. Row 2 and 4: The last frame of the mean-video and per-pixel std of the Full model. The Full model has a more blurry mean-video and higher per-pixel std, which indicates more diverse motion.
|
| 450 |
+
|
| 451 |
+
# E LIMITATIONS
|
| 452 |
+
|
| 453 |
+
Our framework requires a well-trained image generator for frame synthesis. In order to synthesize high-quality and temporally coherent videos, an ideal image generator should satisfy two requirements: R1. The image generator should synthesize high-quality images, otherwise the video discriminator can easily tell the generated videos as the image quality is different from the real videos. R2. The image generator should be able to generate diverse image contents to include enough motion modes for sequence modeling.
|
| 454 |
+
|
| 455 |
+
Example of R1. UCF-101 is a challenging dataset even for the training of an image generator. In Tab. 7, the StyleGAN2 model trained on UCF-101 has FID 45.63, which is much worse than the others. We hypothesis the reason is that UCF-101 dataset has many categories, but within each category, it includes relatively a small amount of videos and these videos share very similar content. Such observation is also discussed in DVDAN (Clark et al., 2019). Although we can achieve
|
| 456 |
+
|
| 457 |
+
state-of-the-art performance on UCF-101 dataset, the quality of the generated videos is not as good as other datasets (Fig. 9), and the quality of synthesized videos is still not close to real videos.
|
| 458 |
+
|
| 459 |
+
Example of R2. We test our method on BAIR Robot Pushing Dataset (Ebert et al., 2017). We train a $64 \times 64$ StyleGAN2 image generator with using the frames from BAIR videos. The image generator has FID as 6.12. Based on the image generator, we train a video generation model that can synthesize 16 frames. An example of synthesized video is shown in Fig. 24 (more videos are in the supplementary materials). We can see our method can successfully model shadow changing, the robot arm moving, but it struggles to decouple the robot arm from some small objects in the background, which we show analysis follows.
|
| 460 |
+
|
| 461 |
+

|
| 462 |
+
Figure 24: A synthesized video using BAIR dataset. Note the background changing of the first frame (upper-left) and the last frame (bottom-right).
|
| 463 |
+
|
| 464 |
+
# E.1 ANALYSIS OF THE INFORMATION CONTAINED IN PCA COMPONENTS.
|
| 465 |
+
|
| 466 |
+
Inspired by previous work (Härkönen et al., 2020), we further investigate the latent space of the image generator by considering the information contained in each PCA component. Fig. 25 shows the percentage of total variance captured by top PCA components. The image generator on BAIR compresses most of the information on a few components. Specially, the top 20 PCA components captures $85\%$ of the variance. In contrast, the latent space of the image generator trained on FFHQ (and FFHQ 1024 for high-resolution image synthesis) uses 100 PCA components to capture $85\%$ information. This implies the BAIR generator models the dataset in a low-dimension space, and such generator increases the difficulty for fully disentangling all the objects in images for manipulation.
|
| 467 |
+
|
| 468 |
+

|
| 469 |
+
Figure 25: Percentage of variations captured by top PCA components on different models.
|
| 470 |
+
|
| 471 |
+
Moreover, we visualize the video synthesis results by moving along the top 20 PCA components. Let $V_{i}$ denote the $i^{th}$ PCA component. Given content code $z_{1}$ , we synthesize a 5-frame video clip by using the following sequence as input: $\{z_{1} - 2V_{i}, z_{1} - V_{i}, z_{1}, z_{1} + V_{i}, z_{1} + 2V_{i}\}$ . In Fig. 26, we show the video synthesis results by moving along the top 20 PCA directions. It can be seen that: 1) changing the later components (the $8^{th}$ and later rows) of BAIR only make small changes; 2) the first 7 components of BAIR have entangled semantic meaning, while the components in FFHQ have more disentangled meaning ( $2^{nd}$ row, rotation; $20^{th}$ row, smile). This indicates the image generator of BAIR may not cover enough (disentangled) motion modes, and it might be hard for the motion generator to fully disentangle all the contents and motion with only a few dominating PCA components, while for the image generator trained on FFHQ, it is much easier for disentangling foreground and background.
|
| 472 |
+
|
| 473 |
+

|
| 474 |
+
Figure 26: Visualization of top 20 principle components of BAIR (left) and FFHQ (right).
|
| 475 |
+
|
| 476 |
+

|
agoodimagegeneratoriswhatyouneedforhighresolutionvideosynthesis/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1100deca6dc72c808edd6d96e09ee7ca8e95b2c73f872fd51fa1c770d8d15430
|
| 3 |
+
size 2003668
|
agoodimagegeneratoriswhatyouneedforhighresolutionvideosynthesis/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e2b3d083a6557f153378693d17ebb8d1fcf80972463e0d1a12672ceb0ccd62e6
|
| 3 |
+
size 750908
|
gradsimdifferentiablesimulationforsystemidentificationandvisuomotorcontrol/d8d01270-8309-43a6-8c79-c5c66d4208a3_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:786346db5f3cfb7c2ef3a6fff0d0106f85203f70c133e747187af74f48fedcf8
|
| 3 |
+
size 149842
|
gradsimdifferentiablesimulationforsystemidentificationandvisuomotorcontrol/d8d01270-8309-43a6-8c79-c5c66d4208a3_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:41c2339e44a575d2d8763d15f957ec97f4df1c401fb3f2a182d77ae67d33e61e
|
| 3 |
+
size 193072
|
gradsimdifferentiablesimulationforsystemidentificationandvisuomotorcontrol/d8d01270-8309-43a6-8c79-c5c66d4208a3_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b6a24bd2bfd068ad3975a5794f1178bc8834661c350e96386ba2d73dcb10a2c
|
| 3 |
+
size 3866499
|
gradsimdifferentiablesimulationforsystemidentificationandvisuomotorcontrol/full.md
ADDED
|
@@ -0,0 +1,599 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# $\nabla$ Sim: DIFFERENTIABLE SIMULATION FOR SYSTEM IDENTIFICATION AND VISUOMOTOR CONTROL
|
| 2 |
+
|
| 3 |
+
https://gradsim.github.io
|
| 4 |
+
|
| 5 |
+
Krishna Murthy Jatavallabhula\*1,3,4, Miles Macklin\*2, Florian Golemo1,3, Vikram Voleti3,4, Linda Petrini3, Martin Weiss3,4, Breandan Considine3,5, Jérôme Parent-Lévesque3,5, Kevin Xie2,6,7, Kenny Erleben8, Liam Paull1,3,4, Florian Shkurti6,7, Derek Nowrouzezahrai3,5, and Sanja Fidler2,6,7
|
| 6 |
+
|
| 7 |
+
<sup>1</sup>Montreal Robotics and Embodied AI Lab, <sup>2</sup>NVIDIA, <sup>3</sup>Mila, <sup>4</sup>Université de Montréal, <sup>5</sup>McGill, <sup>6</sup>University of Toronto, <sup>7</sup>Vector Institute, <sup>8</sup>University of Copenhagen
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+
Figure 1: $\nabla Sim$ is a unified differentiable rendering and multiphysics framework that allows solving a range of control and parameter estimation tasks (rigid bodies, deformable solids, and cloth) directly from images/video.
|
| 11 |
+
|
| 12 |
+

|
| 13 |
+
|
| 14 |
+

|
| 15 |
+
|
| 16 |
+

|
| 17 |
+
|
| 18 |
+
# ABSTRACT
|
| 19 |
+
|
| 20 |
+
We consider the problem of estimating an object's physical properties such as mass, friction, and elasticity directly from video sequences. Such a system identification problem is fundamentally ill-posed due to the loss of information during image formation. Current solutions require precise 3D labels which are labor-intensive to gather, and infeasible to create for many systems such as deformable solids or cloth. We present $\nabla Sim$ , a framework that overcomes the dependence on 3D supervision by leveraging differentiable multiphysics simulation and differentiable rendering to jointly model the evolution of scene dynamics and image formation. This novel combination enables backpropagation from pixels in a video sequence through to the underlying physical attributes that generated them. Moreover, our unified computation graph - spanning from the dynamics and through the rendering process - enables learning in challenging visuomotor control tasks, without relying on state-based (3D) supervision, while obtaining performance competitive to or better than techniques that rely on precise 3D labels.
|
| 21 |
+
|
| 22 |
+
# 1 INTRODUCTION
|
| 23 |
+
|
| 24 |
+
Accurately predicting the dynamics and physical characteristics of objects from image sequences is a long-standing challenge in computer vision. This end-to-end reasoning task requires a fundamental understanding of both the underlying scene dynamics and the imaging process. Imagine watching a short video of a basketball bouncing off the ground and ask: "Can we infer the mass and elasticity of the ball, predict its trajectory, and make informed decisions, e.g., how to pass and shoot?" These seemingly simple questions are extremely challenging to answer even for modern computer vision models. The underlying physical attributes of objects and the system dynamics need to be modeled and estimated, all while accounting for the loss of information during 3D to 2D image formation.
|
| 25 |
+
|
| 26 |
+
Depending on the assumptions on the scene structure and dynamics, three types of solutions exist: black, grey, or white box. Black box methods (Watters et al., 2017; Xu et al., 2019b; Janner et al., 2019; Chang et al., 2016) model the state of a dynamical system (such as the basketball's trajectory in time) as a learned embedding of its states or observations. These methods require few prior assumptions about the system itself, but lack interpretability due to entangled variational factors (Chen et al., 2016) or due to the ambiguities in unsupervised learning (Greydanus et al., 2019; Cranmer et al., 2020b). Recently, grey box methods (Mehta et al., 2020) leveraged partial knowledge about the system dynamics to improve performance. In contrast, white box methods (Degrave et al., 2016; Liang et al., 2019; Hu et al., 2020; Qiao et al., 2020) impose prior knowledge by employing explicit dynamics models, reducing the space of learnable parameters and improving system interpretability.
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
Figure 2: $\nabla Sim$ : Given video observations of an evolving physical system (e), we randomly initialize scene object properties (a) and evolve them over time using a differentiable physics engine (b), which generates states. Our renderer (c) processes states, object vertices and global rendering parameters to produce image frames for computing our loss. We backprop through this computation graph to estimate physical attributes and controls. Existing methods rely solely on differentiable physics engines and require supervision in state-space (f), while $\nabla Sim$ only needs image-space supervision (g).
|
| 30 |
+
|
| 31 |
+
Most notably in our context, all of these approaches require precise 3D labels - which are labor-intensive to gather, and infeasible to generate for many systems such as deformable solids or cloth.
|
| 32 |
+
|
| 33 |
+
# We eliminate the dependence of white box dynamics methods on 3D supervision by coupling explicit (and differentiable) models of scene dynamics with image formation (rendering).
|
| 34 |
+
|
| 35 |
+
Explicitly modeling the end-to-end dynamics and image formation underlying video observations is challenging, even with access to the full system state. This problem has been treated in the vision, graphics, and physics communities (Pharr et al., 2016; Macklin et al., 2014), leading to the development of robust forward simulation models and algorithms. These simulators are not readily usable for solving inverse problems, due in part to their non-differentiability. As such, applications of black-box forward processes often require surrogate gradient estimators such as finite differences or REINFORCE (Williams, 1992) to enable any learning. Likelihood-free inference for black-box forward simulators (Ramos et al., 2019; Cranmer et al., 2020a; Kulkarni et al., 2015; Yildirim et al., 2017; 2015; 2020; Wu et al., 2017b) has led to some improvements here, but remains limited in terms of data efficiency and scalability to high dimensional parameter spaces. Recent progress in differentiable simulation further improves the learning dynamics, however we still lack a method for end-to-end differentiation through the entire simulation process (i.e., from video pixels to physical attributes), a prerequisite for effective learning from video frames alone.
|
| 36 |
+
|
| 37 |
+
We present $\nabla Sim$ , a versatile end-to-end differentiable simulator that adopts a holistic, unified view of differentiable dynamics and image formation(cf. Fig. 1,2). Existing differentiable physics engines only model time-varying dynamics and require supervision in state space (usually 3D tracking). We additionally model a differentiable image formation process, thus only requiring target information specified in image space. This enables us to backpropagate (Griewank & Walther, 2003) training signals from video pixels all the way to the underlying physical and dynamical attributes of a scene.
|
| 38 |
+
|
| 39 |
+
# Our main contributions are:
|
| 40 |
+
|
| 41 |
+
- $\nabla Sim$ , a differentiable simulator that demonstrates the ability to backprop from video pixels to the underlying physical attributes (cf. Fig. 2).
|
| 42 |
+
- We demonstrate recovering many physical properties exclusively from video observations, including friction, elasticity, deformable material parameters, and visuomotor controls (sans 3D supervision)
|
| 43 |
+
- A PyTorch framework facilitating interoperability with existing machine learning modules.
|
| 44 |
+
|
| 45 |
+
We evaluate $\nabla Sim$ 's effectiveness on parameter identification tasks for rigid, deformable and thin-shell bodies, and demonstrate performance that is competitive, or in some cases superior, to current physics-only differentiable simulators. Additionally, we demonstrate the effectiveness of the gradients provided by $\nabla Sim$ on challenging visuomotor control tasks involving deformable solids and cloth.
|
| 46 |
+
|
| 47 |
+
# 2 RELATED WORK
|
| 48 |
+
|
| 49 |
+
Differentiable physics simulators have seen significant attention and activity, with efforts centered around embedding physics structure into autodifferentiation frameworks. This has enabled differentiation through contact and friction models (Toussaint et al., 2018; de Avila Belbute-Peres et al.,
|
| 50 |
+
|
| 51 |
+
2018; Song & Boularias, 2020b;a; Degrave et al., 2016; Wu et al., 2017a; Research, 2020 (accessed May 15, 2020), latent state models (Guen & Thome, 2020; Schenck & Fox, 2018; Jaques et al., 2020; Heiden et al., 2019), volumetric soft bodies (Hu et al., 2019; Liang et al., 2019; Hu et al., 2020), as well as particle dynamics (Schenck & Fox, 2018; Li et al., 2019; 2020; Hu et al., 2020). In contrast, $\nabla Sim$ addresses a superset of simulation scenarios, by coupling the physics simulator with a differentiable rendering pipeline. It also supports tetrahedral FEM-based hyperelasticity models to simulate deformable solids and thin-shells.
|
| 52 |
+
|
| 53 |
+
Recent work on physics-based deep learning injects structure in the latent space of the dynamics using Lagrangian and Hamiltonian operators (Greydanus et al., 2019; Chen et al., 2020; Toth et al., 2020; Sanchez-Gonzalez et al., 2019; Cranmer et al., 2020b; Zhong et al., 2020), by explicitly conserving physical quantities, or with ground truth supervision (Asenov et al., 2019; Wu et al., 2016; Xu et al., 2019b).
|
| 54 |
+
|
| 55 |
+
Sensor readings have been used to predicting the effects of forces applied to an object in models of learned (Fragkiadaki et al., 2016; Byravan & Fox, 2017) and intuitive physics (Ehsani et al., 2020; Mottaghi et al., 2015; 2016; Gupta et al., 2010; Ehrhardt et al., 2018; Yu et al., 2015; Battaglia et al., 2013; Mann et al., 1997; Innamorati et al., 2019; Standley et al., 2017). This also includes approaches that learn to model multi-object interactions (Watters et al., 2017; Xu et al., 2019b; Janner et al., 2019; Ehrhardt et al., 2017; Chang et al., 2016; Agrawal et al., 2016). In many cases, intuitive physics approaches are limited in their prediction horizon and treatment of complex scenes, as they do not sufficiently accurately model the 3D geometry nor the object properties. System identification based on parameterized physics models (Salzmann & Urtasun, 2011; Brubaker et al., 2010; Kozlowski, 1998; Wensing et al., 2018; Brubaker et al., 2009; Bhat et al., 2003; 2002; Liu et al., 2005; Grzeszczuk et al., 1998; Sutanto et al., 2020; Wang et al., 2020; 2018a) and inverse simulation (Murray-Smith, 2000) are closely related areas.
|
| 56 |
+
|
| 57 |
+
There is a rich literature on neural image synthesis, but we focus on methods that model the 3D scene structure, including voxels (Henzler et al., 2019; Paschalidou et al., 2019; Smith et al., 2018b; Nguyen-Phuoc et al., 2018), meshes (Smith et al., 2020; Wang et al., 2018b; Groueax et al., 2018; Alhaija et al., 2018), and implicit shapes (Xu et al., 2019a; Chen & Zhang, 2019; Michalkiewicz et al., 2019; Niemeyer et al., 2020; Park et al., 2019; Mescheder et al., 2019). Generative models condition the rendering process on samples of the 3D geometry (Liao et al., 2019). Latent factors determining 3D structure have also been learned in generative models (Chen et al., 2016; Eslami et al., 2018). Additionally, implicit neural representations that leverage differentiable rendering have been proposed (Mildenhall et al., 2020; 2019) for realistic view synthesis. Many of these representations have become easy to manipulate through software frameworks like Kaolin (Jatavallabhula et al., 2019), Open3D (Zhou et al., 2018), and PyTorch3D (Ravi et al., 2020).
|
| 58 |
+
|
| 59 |
+
Differentiable rendering allows for image gradients to be computed w.r.t. the scene geometry, camera, and lighting inputs. Variants based on the rasterization paradigm (NMR (Kato et al., 2018), OpenDR (Loper & Black, 2014), SoftRas (Liu et al., 2019)) blur the edges of scene triangles prior to image projection to remove discontinuities in the rendering signal. DIB-R (Chen et al., 2019) applies this idea to background pixels and proposes an interpolation-based rasterizer for foreground pixels. More sophisticated differentiable renderers can treat physics-based light transport processes (Li et al., 2018; Nimier-David et al., 2019) by ray tracing, and more readily support higher-order effects such as shadows, secondary light bounces, and global illumination.
|
| 60 |
+
|
| 61 |
+
# 3 $\nabla$ Sim: A UNIFIED DIFFERENTIABLE SIMULATION ENGINE
|
| 62 |
+
|
| 63 |
+
Typically, physics estimation and rendering have been treated as disjoint, mutually exclusive tasks. In this work, we take on a unified view of simulation in general, to compose physics estimation and rendering. Formally, simulation is a function $\operatorname{Sim}:\mathbb{R}^P\times [0,1]\mapsto \mathbb{R}^H\times \mathbb{R}^W$ ; $\operatorname{Sim}(\mathbf{p},t) = \mathcal{I}$ . Here $\mathbf{p}\in \mathbb{R}^{P}$ is a vector representing the simulation state and parameters (objects, their physical properties, their geometries, etc.), $t$ denotes the time of simulation (conveniently reparameterized to be in the interval [0, 1]). Given initial conditions $\mathbf{p}_0$ , the simulation function produces an image $\mathcal{I}$ of height $H$ and width $W$ at each timestep $t$ . If this function Sim were differentiable, then the gradient of $\operatorname{Sim}(\mathbf{p},t)$ with respect to the simulation parameters $\mathbf{p}$ provides the change in the output of the simulation from $\mathcal{I}$ to $\mathcal{I} + \nabla \operatorname{Sim}(\mathbf{p},t)\delta \mathbf{p}$ due to an infinitesimal perturbation of $\mathbf{p}$ by $\delta \mathbf{p}$ . This construct enables a gradient-based optimizer to estimate physical parameters from video, by defining a loss function over the image space $\mathcal{L}(\mathcal{I},.)$ , and descending this loss landscape along a
|
| 64 |
+
|
| 65 |
+
direction parallel to $-\nabla \mathrm{Sim}(.)$ . To realise this, we turn to the paradigms of computational graphs and differentiable programming.
|
| 66 |
+
|
| 67 |
+
$\nabla$ Sim comprises two main components: a differentiable physics engine that computes the physical states of the scene at each time instant, and a differentiable renderer that renders the scene to a 2D image. Contrary to existing differentiable physics (Toussaint et al., 2018; de Avila Belbute-Peres et al., 2018; Song & Boularias, 2020b;a; Degrave et al., 2016; Wu et al., 2017a; Research, 2020 (accessed May 15, 2020; Hu et al., 2020; Qiao et al., 2020) or differentiable rendering (Loper & Black, 2014; Kato et al., 2018; Liu et al., 2019; Chen et al., 2019) approaches, we adopt a holistic view and construct a computational graph spanning them both.
|
| 68 |
+
|
| 69 |
+
# 3.1 DIFFERENTIABLE PHYSICS ENGINE
|
| 70 |
+
|
| 71 |
+
Under Lagrangian mechanics, the state of a physical system can be described in terms of generalized coordinates $\mathbf{q}$ , generalized velocities $\dot{\mathbf{q}} = \mathbf{u}$ , and design/model parameters $\theta$ . For the purpose of exposition, we make no distinction between rigid bodies, or deformable solids, or thin-shell models of cloth, etc. Although the specific choices of coordinates and parameters vary, the simulation procedure is virtually unchanged. We denote the combined state vector by $\mathbf{s}(t) = [\mathbf{q}(t), \mathbf{u}(t)]$ .
|
| 72 |
+
|
| 73 |
+
The dynamic evolution of the system is governed by second order differential equations (ODEs) of the form $\mathbf{M}(\mathbf{s},\theta)\dot{\mathbf{s}} = \mathbf{f}(\mathbf{s},\theta)$ , where $\mathbf{M}$ is a mass matrix that depends on the state and parameters. The forces on the system may be parameterized by design parameters (e.g. Young's modulus). Solutions to these ODEs may be obtained through black box numerical integration methods, and their derivatives calculated through the continuous adjoint method (Chen et al., 2018). However, we instead consider our physics engine as a differentiable operation that provides an implicit relationship between a state vector $\mathbf{s}^{-} = \mathbf{s}(t)$ at the start of a time step, and the updated state at the end of the time step $\mathbf{s}^{+} = \mathbf{s}(t + \Delta t)$ . An arbitrary discrete time integration scheme can be then be abstracted as the function $\mathbf{g}(\mathbf{s}^{-},\mathbf{s}^{+},\theta) = \mathbf{0}$ , relating the initial and final system state and the model parameters $\theta$ .
|
| 74 |
+
|
| 75 |
+
Gradients through this dynamical system can be computed by graph-based autodiff frameworks (Paszke et al., 2019; Abadi et al., 2015; Bradbury et al., 2018), or by program transformation approaches (Hu et al., 2020; van Merrienboer et al., 2018). Our framework is agnostic to the specifics of the differentiable physics engine, however in Appendices A through D we detail an efficient approach based on the source-code transformation of parallel kernels, similar to DiffTaichi (Hu et al., 2020). In addition, we describe extensions to this framework to support mesh-based tetrahedral finite-element models (FEMs) for deformable and thin-shell solids. This is important since we require surface meshes to perform differentiable rasterization as described in the following section.
|
| 76 |
+
|
| 77 |
+
# 3.2 DIFFERENTIABLE RENDERING ENGINE
|
| 78 |
+
|
| 79 |
+
A renderer expects scene description inputs and generates color image outputs, all according to a sequence of image formation stages defined by the forward graphics pipeline. The scene description includes a complete geometric descriptor of scene elements, their associated material/reflectance properties, light source definitions, and virtual camera parameters. The rendering process is not generally differentiable, as visibility and occlusion events introduce discontinuities. Most interactive renderers, such as those used in real-time applications, employ a rasterization process to project 3D geometric primitives onto 2D pixel coordinates, resolving these visibility events with non-differentiable operations.
|
| 80 |
+
|
| 81 |
+
Our experiments employ two differentiable alternatives to traditional rasterization, SoftRas (Liu et al., 2019) and DIB-R (Chen et al., 2019), both of which replace discontinuous triangle mesh edges with smooth sigmoids. This has the effect of blurring triangle edges into semi-transparent boundaries, thereby removing the non-differentiable discontinuity of traditional rasterization. DIB-R distinguishes between foreground pixels (associated to the principal object being rendered in the scene) and background pixels (for all other objects, if any). The latter are rendered using the same technique as SoftRas while the former are rendered by bilinearly sampling a texture using differentiable UV coordinates.
|
| 82 |
+
|
| 83 |
+
$\nabla Sim$ performs differentiable physics simulation and rendering at independent and adjustable rates, allowing us to trade computation for accuracy by rendering fewer frames than dynamics updates.
|
| 84 |
+
|
| 85 |
+
# 4 EXPERIMENTS
|
| 86 |
+
|
| 87 |
+
We conducted multiple experiments to test the efficacy of $\nabla Sim$ on physical parameter identification from video and visuomotor control, to address the following questions:
|
| 88 |
+
|
| 89 |
+
- Can we accurately identify physical parameters by backpropagating from video pixels, through the simulator? (Ans: Yes, very accurately, cf. 4.1)
|
| 90 |
+
- What is the performance gap associated with using $\nabla Sim$ (2D supervision) vs. differentiable physics-only engines (3D supervision)? (Ans: $\nabla Sim$ is competitive/superior, cf. Tables 1, 2, 3)
|
| 91 |
+
- How do loss landscapes differ across differentiable simulators ( $\nabla \operatorname{Sim}$ ) and their non-differentiable counterparts? (Ans: Loss landscapes for $\nabla \operatorname{Sim}$ are smooth, cf. 4.1.3)
|
| 92 |
+
- Can we use $\nabla Sim$ for visuomotor control tasks? (Ans: Yes, without any 3D supervision, cf. 4.2)
|
| 93 |
+
- How sensitive is $\nabla Sim$ to modeling assumptions at system level? (Ans: Moderately, cf. Table 4)
|
| 94 |
+
|
| 95 |
+
Each of our experiments comprises an environment $\mathcal{E}$ that applies a particular set of physical forces and/or constraints, a (differentiable) loss function $\mathcal{L}$ that implicitly specifies an objective, and an initial guess $\theta_0$ of the physical state of the simulation. The goal is to recover optimal physics parameters $\theta^*$ that minimize $\mathcal{L}$ , by backpropagating through the simulator.
|
| 96 |
+
|
| 97 |
+
# 4.1 PHYSICAL PARAMETER ESTIMATION FROM VIDEO
|
| 98 |
+
|
| 99 |
+
First, we assess the capabilities of $\nabla Sim$ to accurately identify a variety of physical attributes such as mass, friction, and elasticity from image/video observations. To the best of our knowledge, $\nabla Sim$ is the first study to jointly infer such fine-grained parameters from video observations. We also implement a set of competitive baselines that use strictly more information on the task.
|
| 100 |
+
|
| 101 |
+
# 4.1.1 RIGID BODIES (RIGID)
|
| 102 |
+
|
| 103 |
+
Our first environment-rigid-evaluates the accuracy of estimating of physical and material attributes of rigid objects from videos. We curate a dataset of 10000 simulated videos generated from variations of 14 objects, comprising primitive shapes such as boxes, cones, cylinders, as well as non-convex shapes from ShapeNet (Chang et al., 2015) and DexNet (Mahler et al., 2017). With uniformly sampled initial dimensions, poses, velocities, and physical properties (density, elasticity, and friction parameters), we apply a known impulse to the object and record a video of the resultant trajectory. Inference with $\nabla Sim$ is done by guessing an initial mass (uniformly random in the range $[2,12]kg/m^3$ ), unrolling a differentiable simulation using this guess, comparing the rendered out video with the true video (pixelwise mean-squared error - MSE), and performing gradient descent updates. We refer the interested reader to the appendix (Sec. G) for more details.
|
| 104 |
+
|
| 105 |
+
Table 1 shows the results for predicting the mass of an object from video, with a known impulse applied
|
| 106 |
+
|
| 107 |
+

|
| 108 |
+
Figure 3: Parameter Estimation: For deformable experiments, we optimize the material properties of a beam to match a video of a beam hanging under gravity. In the rigid experiments, we estimate contact parameters (elasticity/friction) and object density to match a video (GT). We visualize entire time sequences (t) with color-coded blends.
|
| 109 |
+
|
| 110 |
+
to it. We use EfficientNet (B0) (Tan & Le, 2019) and resize input frames to $64 \times 64$ . Feature maps at a resolution of $4 \times 4 \times 32$ are concatenated for all frames and fed to an MLP with 4 linear layers, and trained with an MSE loss. We compare $\nabla Sim$ with three other baselines: PyBullet + REINFORCE (Ehsani et al., 2020; Wu et al., 2015), diff. physics only (requiring 3D supervision), and a ConvLSTM baseline adopted from Xu et al. (2019b) but with a stronger backbone. The DiffPhysics baseline is a strict subset of $\nabla Sim$ , it only involves the differentiable physics engine. However, it needs precise 3D states as supervision, which is the primary factor for its superior performance. Nevertheless, $\nabla Sim$ is able to very precisely estimate mass from video, to a absolute relative error of $9.01\mathrm{e - }5$ , nearly two orders of magnitude better than the ConvLSTM baseline. Two other baselines are also used: the "Average" baseline always predicts the dataset mean and the "Random" baseline
|
| 111 |
+
|
| 112 |
+
<table><tr><td>Approach</td><td>Mean abs. err. (kg)</td><td>Abs. Rel. err.</td></tr><tr><td>Average</td><td>0.2022</td><td>0.1031</td></tr><tr><td>Random</td><td>0.2653</td><td>0.1344</td></tr><tr><td>ConvLSTM Xu et al. (2019b)</td><td>0.1347</td><td>0.0094</td></tr><tr><td>PyBullet + REINFORCE Ehsani et al. (2020)</td><td>0.0928</td><td>0.3668</td></tr><tr><td>DiffPhysics (3D sup.)</td><td>1.35e-9</td><td>5.17e-9</td></tr><tr><td>∇Sim</td><td>2.36e-5</td><td>9.01e-5</td></tr></table>
|
| 113 |
+
|
| 114 |
+
Table 1: Mass estimation: $\nabla Sim$ obtains precise mass estimates, comparing favourably even with approaches that require 3D supervision (diffphysics). We report the mean absolute error and absolute relative errors for all approaches evaluated.
|
| 115 |
+
|
| 116 |
+
<table><tr><td rowspan="2">Approach</td><td>mass</td><td colspan="2">elasticity</td><td colspan="2">friction</td></tr><tr><td>m</td><td>kd</td><td>ke</td><td>kf</td><td>μ</td></tr><tr><td>Average</td><td>1.7713</td><td>3.7145</td><td>2.3410</td><td>4.1157</td><td>0.4463</td></tr><tr><td>Random</td><td>10.0007</td><td>4.18</td><td>2.5454</td><td>5.0241</td><td>0.5558</td></tr><tr><td>ConvLSTM Xu et al. (2019b)</td><td>0.029</td><td>0.14</td><td>0.14</td><td>0.17</td><td>0.096</td></tr><tr><td>DiffPhysics (3D sup.)</td><td>1.70e-8</td><td>0.036</td><td>0.0020</td><td>0.0007</td><td>0.0107</td></tr><tr><td>∇Sim</td><td>2.87e-4</td><td>0.4</td><td>0.0026</td><td>0.0017</td><td>0.0073</td></tr></table>
|
| 117 |
+
|
| 118 |
+
Table 2: Rigid-body parameter estimation: $\nabla Sim$ estimates contact parameters (elasticity, friction) to a high degree of accuracy, despite estimating them from video. Diffphys. requires accurate 3D ground-truth at 30 FPS. We report absolute relative errors for each approach evaluated.
|
| 119 |
+
|
| 120 |
+
<table><tr><td rowspan="3"></td><td colspan="3">Deformable solid FEM</td><td>Thin-shell (cloth)</td></tr><tr><td>Per-particle mass</td><td colspan="2">Material properties</td><td>Per-particle velocity</td></tr><tr><td>m</td><td>μ</td><td>λ</td><td>v</td></tr><tr><td>Approach</td><td>Rel. MAE</td><td>Rel. MAE</td><td>Rel. MAE</td><td>Rel. MAE</td></tr><tr><td>DiffPhysics (3D Sup.)</td><td>0.032</td><td>0.0025</td><td>0.0024</td><td>0.127</td></tr><tr><td>∇Sim</td><td>0.048</td><td>0.0054</td><td>0.0056</td><td>0.026</td></tr></table>
|
| 121 |
+
|
| 122 |
+
Table 3: Parameter estimation of deformable objects: We estimate per-particle masses and material properties (for solid def. objects) and per-particle velocities for cloth. In the case of cloth, there is a perceivable performance drop in diffphysics, as the center of mass of a cloth is often outside the body, which results in ambiguity.
|
| 123 |
+
|
| 124 |
+
predicts a random parameter value from the test distribution. All baselines and training details can be found in Sec. H of the appendix.
|
| 125 |
+
|
| 126 |
+
To investigate whether analytical differentiability is required, our PyBullet + REINFORCE baseline applies black-box gradient estimation (Williams, 1992) through a non-differentiable simulator (Coumans & Bai, 2016-2019), similar to Ehsani et al. (2020). We find this baseline particularly sensitive to several simulation parameters, and thus worse-performing. In Table 2, we jointly estimate friction and elasticity parameters of our compliant contact model from video observations alone. The trend is similar to Table 1, and $\nabla Sim$ is able to precisely recover the parameters of the simulation. A few examples can be seen in Fig. 3.
|
| 127 |
+
|
| 128 |
+
# 4.1.2 DEFORMABLE BODIES (DEFORMABLE)
|
| 129 |
+
|
| 130 |
+
We conduct a series of experiments to investigate the ability of $\nabla Sim$ to recover physical parameters of deformable solids and thin-shell solids (cloth). Our physical model is parameterized by the per-particle mass, and Lamé elasticity parameters, as described in in Appendix C.1. Fig. 3 illustrates the recovery of the elasticity parameters of a beam hanging under gravity by matching the deformation given by an input video sequence. We found our method is able to accurately recover the parameters of 100 instances of deformable objects (cloth, balls, beams) as reported in Table 3 and Fig. 3.
|
| 131 |
+
|
| 132 |
+
# 4.1.3 SMOOTHNESS OF THE LOSS LANDSCAPE IN $\nabla Sim$
|
| 133 |
+
|
| 134 |
+
Since $\nabla Sim$ is a complex combination of differentiable non-linear components, we analyze the loss landscape to verify the validity of gradients through the system. Fig. 4 illustrates the loss landscape when optimizing for the mass of a rigid body when all other physical properties are known.
|
| 135 |
+
|
| 136 |
+
We examine the image-space mean-squared error (MSE) of a unit-mass cube $(1\mathrm{kg})$ for a range of initializations $(0.1\mathrm{kg}$ to $5\mathrm{kg})$ . Notably, the loss landscape of $\nabla Sim$ is well-behaved and conducive to momentum-based optimizers. Applying MSE to the first and last frames of the predicted and true videos provides the best gradients. However, for a naive gradient estimator applied to a non-differentiable simulator (PyBullet + REINFORCE), multiple local minima exist resulting in a very narrow region of convergence. This explains $\nabla Sim$ 's superior performance in Tables 1, 2, 3.
|
| 137 |
+
|
| 138 |
+
# 4.2 VISUOMOTOR CONTROL
|
| 139 |
+
|
| 140 |
+
To investigate whether the gradients computed by $\nabla Sim$ are meaningful for vision-based tasks, we conduct a range of visuomotor control experiments involving the actuation of deformable objects towards a visual target pose (a single image). In all cases, we evaluate against diffphysics, which uses a goal specification and a reward, both defined over the 3D state-space.
|
| 141 |
+
|
| 142 |
+

|
| 143 |
+
(a) Loss landscape (rigid)
|
| 144 |
+
|
| 145 |
+

|
| 146 |
+
(b) Loss landscape (deformable)
|
| 147 |
+
Figure 4: Loss landscapes when optimizing for physical attributes using $\nabla Sim$ . (Left) When estimating the mass of a rigid-body with known shape using $\nabla Sim$ , despite images being formed by a highly nonlinear process (simulation), the loss landscape is remarkably smooth, for a range of initialization errors. (Right) when optimizing for the elasticity parameters of a deformable FEM solid. Both the Lamé parameters $\lambda$ and $\mu$ are set to 1000, where the MSE loss has a unique, dominant minimum. Note that, for fair comparison, the ground-truth for our PyBullet+REINFORCE baseline was generated using the PyBullet engine.
|
| 148 |
+
|
| 149 |
+
# 4.2.1 DEFORMABLE SOLIDS (CONTROL-WALKER, CONTROL-FEM)
|
| 150 |
+
|
| 151 |
+
The first example (control-walker) involves a 2D walker model. Our goal is to train a neural network (NN) control policy to actuate the walker to reach a target pose on the right-hand side of an image. Our NN consists of one fully connected layer and a tanh activation. The network input is a set of 8 time-varying sinusoidal signals, and the output is a scalar activation value per-tetrahedron. $\nabla Sim$ is able to solve this environment within three iterations of gradient descent, by minimizing a pixelwise MSE between the last frame of the rendered video and the goal image as shown in Fig. 5 (lower left).
|
| 152 |
+
|
| 153 |
+
In our second test, we formulate a more challenging 3D control problem (control-fem) where the goal is to actuate a soft-body FEM object (a gear) consisting of 1152 tetrahedral elements to move to a target position as shown in Fig. 5 (center). We use the same NN architecture as in the 2D walker example, and use the Adam (Kingma & Ba, 2015) optimizer
|
| 154 |
+
|
| 155 |
+
to minimize a pixelwise MSE loss. We also train a privileged baseline (diffphysics) that uses strong supervision and minimizes the MSE between the target position and the precise 3D location of the center-of-mass (COM) of the FEM model at each time step (i.e. a dense reward). We test both diffphysics and $\nabla Sim$ against a naive baseline that generates random activations and plot convergence behaviors in Fig. 6a.
|
| 156 |
+
|
| 157 |
+
While diffphysics appears to be a strong performer on this task, it is important to note that it uses explicit 3D supervision at each timestep (i.e. 30 FPS). In contrast, $\nabla Sim$ uses a single image as an implicit target, and yet manages to achieve the goal state, albeit taking a longer number of iterations.
|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
Figure 5: Visuomotor Control: $\nabla Sim$ provides gradients suitable for diverse, complex visuomotor control tasks. For control-fem and control-walker experiments, we train a neural network to actuate a soft body towards a target image (GT). For control-cloth, we optimize the cloth's initial velocity to hit a target (GT) (specified as an image), under nonlinear lift/drag forces.
|
| 161 |
+
|
| 162 |
+
# 4.2.2 CLOTH (CONTROL-CLOTH)
|
| 163 |
+
|
| 164 |
+
We design an experiment to control a piece of cloth by optimizing the initial velocity such that it reaches a pre-specified target. In each episode, a random cloth is spawned, comprising between 64 and 2048 triangles, and a new start/goal combination is chosen.
|
| 165 |
+
|
| 166 |
+
In this challenging setup, we notice that state-based MPC (diffphysics) is often unable to accurately reach the target. We believe this is due to the underdetermined nature of the problem, since, for objects such as cloth, the COM by itself does not uniquely determine the configuration of the object. Visuomotor control on the other hand, provides a more well-defined problem. An illustration of the task is presented in Fig. 5 (column 3), and the convergence of the methods shown in Fig. 6b.
|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
(a) Results of various approaches on the control-fem environment (6 randomseeds; each randomseed corresponds to a different goal configuration). While diff-physics performs well, it assumes strong 3D supervision. In contrast, $\nabla Sim$ is able to solve the task by using just a single image of the target configuration.
|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
(b) Results on control-cloth environment (5 randomseeds; each controls the dimensions and initial/target poses of the cloth). diffphysics converges to a suboptimal solution due to ambiguity in specifying the pose of a cloth via its center-of-mass. $\nabla Sim$ solves the environment using a single target image.
|
| 173 |
+
Figure 6: Convergence Analysis: Performance of $\nabla Sim$ on visuomotor control using image-based supervision, 3D supervision, and random policies.
|
| 174 |
+
|
| 175 |
+
# 4.3 IMPACT OF IMPERFECT DYNAMICS AND RENDERING MODELS
|
| 176 |
+
|
| 177 |
+
Being a white box method, the performance of $\nabla Sim$ relies on the choice of dynamics and rendering models employed. An immediate question that arises is "how would the performance of $\nabla Sim$ be impacted (if at all) by such modeling choices." We conduct multiple experiments targeted at investigating modelling errors and summarize them in Table 4 (left).
|
| 178 |
+
|
| 179 |
+
We choose a dataset comprising 90 objects equally representing rigid, deformable, and cloth types. By not modeling specific dynamics and rendering phenomena, we create the following 5 variants of our simulator.
|
| 180 |
+
|
| 181 |
+
1. Unmodeled friction: We model all collisions as being frictionless.
|
| 182 |
+
2. Unmodeled elasticity: We model all collisions as perfectly elastic.
|
| 183 |
+
3. Rigid-as-deformable: All rigid objects in the dataset are modeled as deformable objects.
|
| 184 |
+
4. Deformable-as-rigid: All deformable objects in the dataset are modeled as rigid objects.
|
| 185 |
+
5. Photorealistic render: We employ a photorealistic renderer—as opposed to $\nabla Sim$ ’s differentiable rasterizers—in generating the target images.
|
| 186 |
+
|
| 187 |
+
In all cases, we evaluate the accuracy with which the mass of the target object is estimated from a target video sequence devoid of modeling discrepancies. In general, we observe that imperfect dynamics models (i.e. unmodeled friction and elasticity, or modeling a rigid object as deformable or vice-versa) have a more profound impact on parameter identification compared to imperfect renderers.
|
| 188 |
+
|
| 189 |
+
# 4.3.1 UNMODELED DYNAMICS PHENOMENON
|
| 190 |
+
|
| 191 |
+
From Table 4 (left), we observe a noticeable performance drop when dynamics effects go unmodeled. Expectedly, the repurcussions of incorrect object type modeling (Rigid-as-deformable, Deformable-as-rigid) are more severe compared to unmodeled contact parameters (friction, elasticity). Modeling a deformable body as a rigid body results in irrecoverable deformation parameters and has the most severe impact on the recovered parameter set.
|
| 192 |
+
|
| 193 |
+
# 4.3.2 UNMODELED RENDERING PHENOMENON
|
| 194 |
+
|
| 195 |
+
We also independently investigate the impact of unmodeled rendering effects (assuming perfect dynamics). We independently render ground-truth images and object foreground masks from a photorealistic renderer (Pharr et al., 2016). We use these photorealistic renderings for ground-truth and perform physical parameter estimation from video. We notice that the performance obtained under this setting is superior compared to ones with dynamics model imperfections.
|
| 196 |
+
|
| 197 |
+
<table><tr><td></td><td>Mean Rel. Abs. Err.</td><td>Tetrahedra (#)</td><td>Forward (DP)</td><td>Forward (DR)</td><td>Backward (DP)</td><td>Backward (DP + DR)</td></tr><tr><td>Unmodeled friction</td><td>0.1866</td><td>100</td><td>9057 Hz</td><td>3504 Hz</td><td>3721 Hz</td><td>3057 Hz</td></tr><tr><td>Unmodeled elasticity</td><td>0.2281</td><td>200</td><td>9057 Hz</td><td>3478 Hz</td><td>3780 Hz</td><td>2963 Hz</td></tr><tr><td>Rigid-as-deformable</td><td>0.3462</td><td>400</td><td>8751 Hz</td><td>3357 Hz</td><td>3750 Hz</td><td>1360 Hz</td></tr><tr><td>Deformable-as-rigid</td><td>0.4974</td><td>1000</td><td>4174 Hz</td><td>1690 Hz</td><td>1644 Hz</td><td>1041 Hz</td></tr><tr><td rowspan="2">Photorealistic render</td><td rowspan="2">0.1793</td><td>2000</td><td>3967 Hz</td><td>1584 Hz</td><td>1655 Hz</td><td>698 Hz</td></tr><tr><td>5000</td><td>3871 Hz</td><td>1529 Hz</td><td>1553 Hz</td><td>424 Hz</td></tr><tr><td>Perfect model</td><td>0.1071</td><td>10000</td><td>3721 Hz</td><td>1500 Hz</td><td>1429 Hz</td><td>248 Hz</td></tr></table>
|
| 198 |
+
|
| 199 |
+
Table 4: (Left) Impact of imperfect models: The accuracy of physical parameters estimated by $\nabla Sim$ is impacted by the choice of dynamics and graphics (rendering) models. We find that the system is more sensitive to the choice of dynamics models than to the rendering engine used. (Right) Timing analysis: We report runtime in simulation steps / second (Hz). $\nabla Sim$ is significantly faster than real-time, even for complex geometries.
|
| 200 |
+
|
| 201 |
+
# 4.3.3 IMPACT OF SHADING AND TEXTURE CUES
|
| 202 |
+
|
| 203 |
+
Although our work does not attempt to bridge the reality gap, we show early prototypes to assess phenomena such as shading/texture. Fig. 7 shows the accuracy over time for mass estimation from video. We evaluate three variants of the renderer - "Only color", "Shading", and "Texture". The "Only color" variant renders each mesh element in the same color regardless of the position and orientation of the light source. The "Shading" variant implements a Phong shading model and can model specular and diffuse reflections. The "Texture" variant also applies a non-uniform texture sampled from ShapeNet (Chang et al., 2015). We notice that shading and texture cues significantly improve convergence speed. This is expected, as vertex colors often have very little appearance cues inside the object boundaries, leading to poor correspondences between the rendered and ground-truth images. Furthermore, textures seem to offer slight improvements in convergence speed over shaded models, as highlighted by the inset (log scale) plot in Fig. 7.
|
| 204 |
+
|
| 205 |
+
# 4.3.4 TIMING ANALYSIS
|
| 206 |
+
|
| 207 |
+
Table 4 (right) shows simulation rates for the forward and backward passes of each module. We report forward and backward pass rates separately for the differentiable physics (DP) and the differentiable rendering (DR) modules. The time complexity of $\nabla Sim$ is a function of the number of tetrahedrons and/or triangles. We illustrate the arguably more complex case of deformable object simulation for varying numbers of tetrahedra (ranging from 100 to 10000). Even in the case of 10000 tetrahedra—enough to contract complex mesh models of multiple moving objects— $\nabla Sim$ enables faster-than-realtime simulation (1500 steps/second).
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
Figure 7: Including shading and texture cues lead to faster convergence. Inset plot has a logarithmic Y-axis.
|
| 211 |
+
|
| 212 |
+
# 5 CONCLUSION
|
| 213 |
+
|
| 214 |
+
We presented $\nabla Sim$ , a versatile differentiable simulator that enables system identification from
|
| 215 |
+
|
| 216 |
+
videos by differentiating through physical processes governing dynamics and image formation. We demonstrated the benefits of such a holistic approach by estimating physical attributes for time-evolving scenes with complex dynamics and deformations, all from raw video observations. We also demonstrated the applicability of this efficient and accurate estimation scheme on end-to-end visuomotor control tasks. The latter case highlights $\nabla Sim$ 's efficient integration with PyTorch, facilitating interoperability with existing machine learning modules. Interesting avenues for future work include extending our differentiable simulation to contact-rich motion, articulated bodies and higher-fidelity physically-based renderers - doing so takes us closer to operating in the real-world.
|
| 217 |
+
|
| 218 |
+
# ACKNOWLEDGEMENTS
|
| 219 |
+
|
| 220 |
+
KM and LP thank the IVADO fundamental research project grant for funding. FG thanks CIFAR for project funding under the Catalyst program. FS and LP acknowledge partial support from NSERC.
|
| 221 |
+
|
| 222 |
+
# REFERENCES
|
| 223 |
+
|
| 224 |
+
Martín Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S. Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Ian Goodfellow, Andrew Harp, Geoffrey Irving, Michael Isard, Yangqing Jia, Rafal Jozefowicz, Lukasz Kaiser, Manjunath Kudlur, Josh Levenberg, Dan Mané, Rajat Monga, Sherry Moore, Derek Murray, Chris Olah, Mike Schuster, Jonathon Shlens, Benoit Steiner, Ilya Sutskever, Kunal Talwar, Paul Tucker, Vincent Vanhoucke, Vijay Vasudevan, Fernanda Viégas, Oriol Vinyals, Pete Warden, Martin Wattenberg, Martin Wicke, Yuan Yu, and Xiaogiang Zheng. TensorFlow: Large-scale machine learning on heterogeneous systems, 2015. URL http://tensorflow.org/. Software available from tensorflow.org. 4, 17
|
| 225 |
+
Pulkit Agrawal, Ashvin Nair, Pieter Abbeel, Jitendra Malik, and Sergey Levine. Learning to poke by poking: Experiential learning of intuitive physics. Neural Information Processing Systems, 2016. 3
|
| 226 |
+
Hassan Abu Alhajla, Siva Karthik Mustikovela, Andreas Geiger, and Carsten Rother. Geometric image synthesis. In Proceedings of Computer Vision and Pattern Recognition, 2018. 3
|
| 227 |
+
Martin Asenov, Michael Burke, Daniel Angelov, Todor Davchev, Kartic Subr, and Subramanian Ramamoorthy. Vid2Param: Modelling of dynamics parameters from video. IEEE Robotics and Automation Letters, 2019. 3
|
| 228 |
+
Peter W. Battaglia, Jessica B. Hamrick, and Joshua B. Tenenbaum. Simulation as an engine of physical scene understanding. Proceedings of the National Academy of Sciences, 110(45):18327-18332, 2013. ISSN 0027-8424. doi: 10.1073/pnas.1306572110. 3
|
| 229 |
+
Kiran S Bhat, Steven M Seitz, Jovan Popovic, and Pradeep K Khosla. Computing the physical parameters of rigid-body motion from video. In Proceedings of the European Conference on Computer Vision, 2002. 3
|
| 230 |
+
Kiran S Bhat, Christopher D Twigg, Jessica K Hodgins, Pradeep Khosla, Zoran Popovic, and Steven M Seitz. Estimating cloth simulation parameters from video. In ACM SIGGRAPH/Eurographics Symposium on Computer Animation, 2003. 3
|
| 231 |
+
James Bradbury, Roy Frostig, Peter Hawkins, Matthew James Johnson, Chris Leary, Dougal Maclaurin, and Skye Wanderman-Milne. JAX: composable transformations of Python+NumPy programs, 2018. URL http://github.com/google/jax.4.17
|
| 232 |
+
Robert Bridson, Sebastian Marino, and Ronald Fedkiw. Simulation of clothing with folds and wrinkles. In ACM SIGGRAPH 2005 Courses, 2005. 17
|
| 233 |
+
Marcus Brubaker, David Fleet, and Aaron Hertzmann. Physics-based person tracking using the anthropomorphic walker. International Journal of Computer Vision, 87:140-155, 03 2010. 3
|
| 234 |
+
Marcus A Brubaker, Leonid Signal, and David J Fleet. Estimating contact dynamics. In Proceedings of International Conference on Computer Vision, 2009. 3
|
| 235 |
+
Arunkumar Byravan and Dieter Fox. SE3-Nets: Learning rigid body motion using deep neural networks. IEEE International Conference on Robotics and Automation (ICRA), 2017. 3
|
| 236 |
+
Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. ShapeNet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015. 5, 9
|
| 237 |
+
Michael B. Chang, Tomer Ullman, Antonio Torralba, and Joshua B. Tenenbaum. A compositional object-based approach to learning physical dynamics. International Conference on Learning Representations, 2016. 1, 3
|
| 238 |
+
Tian Qi Chen, Yulia Rubanova, Jesse Bettencourt, and David K Duvenaud. Neural ordinary differential equations. In Neural Information Processing Systems, 2018. 4, 17
|
| 239 |
+
Wenzheng Chen, Jun Gao, Huan Ling, Edward Smith, Jaakko Lehtinen, Alec Jacobson, and Sanja Fidler. Learning to predict 3d objects with an interpolation-based differentiable renderer. Neural Information Processing Systems, 2019. 3, 4, 19
|
| 240 |
+
|
| 241 |
+
Xi Chen, Yan Duan, Rein Houthooft, John Schulman, Ilya Sutskever, and Pieter Abbeel. InfoGAN: Interpretable representation learning by information maximizing generative adversarial nets. Neural Information Processing Systems, 2016. 1, 3
|
| 242 |
+
Zhengdao Chen, Jianyu Zhang, Martin Arjovsky, and Léon Bottou. Symplectic recurrent neural networks. In International Conference on Learning Representations, 2020. 3
|
| 243 |
+
Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. Proceedings of Computer Vision and Pattern Recognition, 2019. 3
|
| 244 |
+
Erwin Coumans and Yunfei Bai. PyBullet, a python module for physics simulation for games, robotics and machine learning. http://pybullet.org, 2016-2019. 6, 22
|
| 245 |
+
Kyle Cranmer, Johann Brehmer, and Gilles Louppe. The frontier of simulation-based inference. In National Academy of Sciences (NAS), 2020a. 2
|
| 246 |
+
Miles Cranmer, Sam Greydanus, Stephan Hoyer, Peter Battaglia, David Spergel, and Shirley Ho. Lagrangian neural networks. In ICLR Workshops, 2020b. 1, 3, 20
|
| 247 |
+
Filipe de Avila Belbute-Peres, Kevin Smith, Kelsey Allen, Josh Tenenbaum, and J. Zico Kolter. End-to-end differentiable physics for learning and control. In Neural Information Processing Systems, 2018. 2, 4, 19, 20, 25
|
| 248 |
+
Jonas Degrave, Michiel Hermans, Joni Dambre, and Francis Wyffels. A differentiable physics engine for deep learning in robotics. Neural Information Processing Systems, 2016. 1, 3, 4, 20
|
| 249 |
+
Sebastien Ehrhardt, Aron Monszpart, Niloy J. Mitra, and Andrea Vedaldi. Learning a physical long-term predictor. arXiv, 2017. 3
|
| 250 |
+
Sebastien Ehrhardt, Aron Monszpart, Niloy J. Mitra, and Andrea Vedaldi. Unsupervised intuitive physics from visual observations. Asian Conference on Computer Vision, 2018. 3
|
| 251 |
+
Kiana Ehsani, Shubham Tulsiani, Saurabh Gupta, Ali Farhadi, and Abhinav Gupta. Use the Force, Luke! learning to predict physical forces by simulating effects. In Proceedings of Computer Vision and Pattern Recognition, 2020. 3, 5, 6, 23
|
| 252 |
+
Tom Erez, Yuval Tassa, and Emanuel Todorov. Simulation tools for model-based robotics: Comparison of Bullet, Havok, MuJoCo, ODE, and PhysX. In IEEE International Conference on Robotics and Automation (ICRA), 2015. 17, 18, 19
|
| 253 |
+
S. M. Ali Eslami, Danilo Jimenez Rezende, Frederic Besse, Fabio Viola, Ari S. Morcos, Marta Garnelo, Avraham Ruderman, Andrei A. Rusu, Ivo Danihelka, Karol Gregor, David P. Reichert, Lars Buesing, Theophane Weber, Oriol Vinyals, Dan Rosenbaum, Neil Rabinowitz, Helen King, Chloe Hillier, Matt Botvinick, Daan Wierstra, Koray Kavukcuoglu, and Demis Hassabis. Neural scene representation and rendering. Science, 2018. 3
|
| 254 |
+
Katerina Fragkiadaki, Pulkit Agrawal, Sergey Levine, and Jitendra Malik. Learning visual predictive models of physics for playing billiards. In International Conference on Learning Representations, 2016. 3
|
| 255 |
+
Sam Greydanus, Misko Dzamba, and Jason Yosinski. Hamiltonian neural networks. In Neural Information Processing Systems, 2019. 1, 3, 20
|
| 256 |
+
Andreas Griewank and Andrea Walther. Introduction to automatic differentiation. PAMM, 2(1): 45-49, 2003. 2
|
| 257 |
+
Thibault Groueix, Matthew Fisher, Vladimir G. Kim, Bryan C. Russell, and Mathieu Aubry. Atlasnet: A papier-mâché approach to learning 3d surface generation. In Proceedings of Computer Vision and Pattern Recognition, 2018. 3
|
| 258 |
+
Radek Grzeszczuk, Demetri Terzopoulos, and Geoffrey Hinton. Neuroanimator: Fast neural network emulation and control of physics-based models. In Proceedings of the 25th annual conference on Computer graphics and interactive techniques, 1998. 3
|
| 259 |
+
|
| 260 |
+
Vincent Le Guen and Nicolas Thome. Disentangling physical dynamics from unknown factors for unsupervised video prediction. In Proceedings of Computer Vision and Pattern Recognition, 2020. 3
|
| 261 |
+
Abhinav Gupta, Alexei A. Efros, and Martial Hebert. Blocks world revisited: Image understanding using qualitative geometry and mechanics. In Proceedings of the European Conference on Computer Vision, 2010. 3
|
| 262 |
+
Ernst Hairer, Christian Lubich, and Gerhard Wanner. Geometric numerical integration: structure-preserving algorithms for ordinary differential equations, volume 31. Springer Science & Business Media, 2006. 18
|
| 263 |
+
Eric Heiden, David Millard, Hejia Zhang, and Gaurav S. Sukhatme. Interactive differentiable simulation. In arXiv, 2019. 3
|
| 264 |
+
Philipp Henzler, Niloy J. Mitra, and Tobias Ritschel. Escaping plato's cave using adversarial training: 3d shape from unstructured 2d image collections. In Proceedings of International Conference on Computer Vision, 2019. 3
|
| 265 |
+
Yuanming Hu, Yu Fang, Ziheng Ge, Ziyin Qu, Yixin Zhu, Andre Pradhana, and Chenfanfu Jiang. A moving least squares material point method with displacement discontinuity and two-way rigid body coupling. ACM Transactions on Graphics, 37(4), 2018. 3
|
| 266 |
+
Yuanming Hu, Jiancheng Liu, Andrew Spielberg, Joshua B. Tenenbaum, William T. Freeman, Jiajun Wu, Daniela Rus, and Wojciech Matusik. Chainqueen: A real-time differentiable physical simulator for soft robotics. In IEEE International Conference on Robotics and Automation (ICRA), 2019. 3, 17
|
| 267 |
+
Yuanming Hu, Luke Anderson, Tzu-Mao Li, Qi Sun, Nathan Carr, Jonathan Ragan-Kelley, and Frédo Durand. DiffTaichi: Differentiable programming for physical simulation. International Conference on Learning Representations, 2020. 1, 3, 4, 17, 19, 20
|
| 268 |
+
Carlo Innamorati, Bryan Russell, Danny Kaufman, and Niloy Mitra. Neural re-simulation for generating bounces in single images. In Proceedings of International Conference on Computer Vision, 2019. 3
|
| 269 |
+
Michael Janner, Sergey Levine, William T. Freeman, Joshua B. Tenenbaum, Chelsea Finn, and Jiajun Wu. Reasoning about physical interactions with object-oriented prediction and planning. International Conference on Learning Representations, 2019. 1, 3
|
| 270 |
+
Miguel Jaques, Michael Burke, and Timothy M. Hospedales. Physics-as-inverse-graphics: Joint unsupervised learning of objects and physics from video. International Conference on Learning Representations, 2020. 3
|
| 271 |
+
Krishna Murthy Jatavallabhula, Edward Smith, Jean-Francois Lafleche, Clement Fuji Tsang, Artem Rozantsev, Wenzheng Chen, Tommy Xiang, Rev Lebaredian, and Sanja Fidler. Kaolin: A pytorch library for accelerating 3d deep learning research. In arXiv, 2019. 3
|
| 272 |
+
Hiroharu Kato, Yoshitaka Ushiku, and Tatsuya Harada. Neural 3d mesh renderer. In Proceedings of Computer Vision and Pattern Recognition, 2018. 3, 4
|
| 273 |
+
Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations, 2015. 7, 24
|
| 274 |
+
David Kirk et al. Nvidia CUDA software andgpu parallel computing architecture. In ISMM, volume 7, pp. 103-104, 2007. 17
|
| 275 |
+
Krzysztof Kozlowski. Modelling and Identification in Robotics. Advances in Industrial Control. Springer, London, 1998. ISBN 978-1-4471-1139-9. 3
|
| 276 |
+
T. D. Kulkarni, P. Kohli, J. B. Tenenbaum, and V. Mansinghka. Picture: A probabilistic programming language for scene perception. In Proceedings of Computer Vision and Pattern Recognition, 2015.
|
| 277 |
+
|
| 278 |
+
Tzu-Mao Li, Miika Aittala, Fredo Durand, and Jaakko Lehtinen. Differentiable monte carlo ray tracing through edge sampling. SIGGRAPH Asia, 37(6):222:1-222:11, 2018. 3
|
| 279 |
+
Yunzhu Li, Jiajun Wu, Russ Tedrake, Joshua B Tenenbaum, and Antonio Torralba. Learning particle dynamics for manipulating rigid bodies, deformable objects, and fluids. In International Conference on Learning Representations, 2019. 3
|
| 280 |
+
Yunzhu Li, Toru Lin, Kexin Yi, Daniel Bear, Daniel L. K. Yamins, Jiajun Wu, Joshua B. Tenenbaum, and Antonio Torralba. Visual grounding of learned physical models. In International Conference on Machine Learning, 2020. 3
|
| 281 |
+
Junbang Liang, Ming Lin, and Vladlen Koltun. Differentiable cloth simulation for inverse problems. In Neural Information Processing Systems, 2019. 1, 3
|
| 282 |
+
Yiyi Liao, Katja Schwarz, Lars Mescheder, and Andreas Geiger. Towards unsupervised learning of generative models for 3d controllable image synthesis. In Proceedings of Computer Vision and Pattern Recognition, 2019. 3
|
| 283 |
+
C Karen Liu, Aaron Hertzmann, and Zoran Popovic. Learning physics-based motion style with nonlinear inverse optimization. ACM Transactions on Graphics (TOG), 24(3):1071-1081, 2005. 3
|
| 284 |
+
Shichen Liu, Tianye Li, Weikai Chen, and Hao Li. Soft rasterizer: A differentiable renderer for image-based 3d reasoning. Proceedings of International Conference on Computer Vision, 2019. 3, 4, 19
|
| 285 |
+
Matthew M. Loper and Michael J. Black. Opendr: An approximate differentiable renderer. In Proceedings of the European Conference on Computer Vision, 2014. 3, 4
|
| 286 |
+
Miles Macklin, Matthias Müller, Nuttapong Chentanez, and Tae-Yong Kim. Unified particle physics for real-time applications. ACM Transactions on Graphics (TOG), 33(4):1-12, 2014. 2
|
| 287 |
+
Dougal Maclaurin, David Duvenaud, Matt Johnson, and Jamie Townsend. Autograd, 2015. URL https://github.com/HIPS/autograd.20
|
| 288 |
+
Jeffrey Mahler, Jacky Liang, Sherdil Niyaz, Michael Laskey, Richard Doan, Xinyu Liu, Juan Aparicio Ojea, and Ken Goldberg. Dex-net 2.0: Deep learning to plan robust grasps with synthetic point clouds and analytic grasp metrics. In Robotics Science and Systems, 2017. 5
|
| 289 |
+
Richard Mann, Allan Jepson, and Jeffrey Mark Siskind. The computational perception of scene dynamics. Computer Vision and Image Understanding, 65(2):113-128, 1997. 3
|
| 290 |
+
Charles C Margossian. A review of automatic differentiation and its efficient implementation. Wiley Interdisciplinary Reviews: Data Mining and Knowledge Discovery, 9(4):e1305, 2019. 20
|
| 291 |
+
Viraj Mehta, Ian Char, Willie Neiswanger, Youngseog Chung, Andrew Oakleigh Nelson, Mark D Boyer, Egemen Kolemen, and Jeff Schneider. Neural dynamical systems: Balancing structure and flexibility in physical prediction. ICLR Workshops, 2020. 1
|
| 292 |
+
Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings of Computer Vision and Pattern Recognition, 2019. 3
|
| 293 |
+
Mateusz Michalkiewicz, Jhony K. Pontes, Dominic Jack, Mahsa Baktashmotlagh, and Anders Eriksson. Implicit surface representations as layers in neural networks. In Proceedings of International Conference on Computer Vision, 2019. 3
|
| 294 |
+
Ben Mildenhall, Pratul P. Srinivasan, Rodrigo Ortiz-Cayon, Nima Khademi Kalantari, Ravi Ramamoorthi, Ren Ng, and Abhishek Kar. Local light field fusion: Practical view synthesis with prescriptive sampling guidelines. ACM Transactions on Graphics (TOG), 2019. 3
|
| 295 |
+
Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In Proceedings of the European Conference on Computer Vision, 2020. 3
|
| 296 |
+
|
| 297 |
+
Roozbeh Mottaghi, Hessam Bagherinezhad, Mohammad Rastegari, and Ali Farhadi. Newtonian image understanding: Unfolding the dynamics of objects in static images. Proceedings of Computer Vision and Pattern Recognition, 2015. 3
|
| 298 |
+
Roozbeh Mottaghi, Mohammad Rastegari, Abhinav Gupta, and Ali Farhadi. "what happens if..." learning to predict the effect of forces in images. In Proceedings of the European Conference on Computer Vision, 2016. 3
|
| 299 |
+
D.J. Murray-Smith. The inverse simulation approach: a focused review of methods and applications. Mathematics and Computers in Simulation, 53(4):239 - 247, 2000. ISSN 0378-4754. 3
|
| 300 |
+
Thu Nguyen-Phuoc, Chuan Li, Stephen Balaban, and Yong-Liang Yang. Rendernet: A deep convolutional network for differentiable rendering from 3d shapes. Neural Information Processing Systems, 2018. 3
|
| 301 |
+
Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proceedings of Computer Vision and Pattern Recognition, 2020. 3
|
| 302 |
+
Merlin Nimier-David, Delio Vicini, Tizian Zeltner, and Wenzel Jakob. Mitsuba 2: A retargetable forward and inverse renderer. Transactions on Graphics (Proceedings of SIGGRAPH Asia), 38(6), 2019. 3
|
| 303 |
+
Jeong Joon Park, Peter Florence, Julian Straub, Richard A. Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In Proceedings of Computer Vision and Pattern Recognition, 2019. 3
|
| 304 |
+
Despoina Paschalidou, Ali Osman Ulusoy, Carolin Schmitt, Luc van Gool, and Andreas Geiger. Raynet: Learning volumetric 3d reconstruction with ray potentials. In Proceedings of Computer Vision and Pattern Recognition, 2019. 3
|
| 305 |
+
Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In Neural Information Processing Systems, 2019. 4, 17
|
| 306 |
+
Matt Pharr, Wenzel Jakob, and Greg Humphreys. Physically Based Rendering: From Theory to Implementation. Morgan Kaufmann Publishers Inc., 2016. ISBN 0128006455. 2, 8
|
| 307 |
+
Yi-Ling Qiao, Junbang Liang, Vladlen Koltun, and Ming C Lin. Scalable differentiable physics for learning and control. International Conference on Machine Learning, 2020. 1, 4
|
| 308 |
+
Fabio Ramos, Rafael Carvalhaes Possas, and Dieter Fox. Bayessim: adaptive domain randomization via probabilistic inference for robotics simulators. Robotics Science and Systems, 2019. 2
|
| 309 |
+
Nikhila Ravi, Jeremy Reizenstein, David Novotny, Taylor Gordon, Wan-Yen Lo, Justin Johnson, and Georgia Gkioxari. Accelerating 3d deep learning with pytorch3d. arXiv preprint arXiv:2007.08501, 2020. 3
|
| 310 |
+
Google Research. Tiny Differentiable Simulator, 2020 (accessed May 15, 2020). URL https://github.com/google-research/tiny-differentiable-simulator.3,4
|
| 311 |
+
Danilo Jimenez Rezende, SM Ali Eslami, Shakir Mohamed, Peter Battaglia, Max Jaderberg, and Nicolas Heess. Unsupervised learning of 3d structure from images. In Neural Information Processing Systems, 2016. 23
|
| 312 |
+
Mathieu Salzmann and Raquel Urtasun. Physically-based motion models for 3d tracking: A convex formulation. In Proceedings of International Conference on Computer Vision, 2011. 3
|
| 313 |
+
Alvaro Sanchez-Gonzalez, Victor Bapst, Kyle Cranmer, and Peter Battaglia. Hamiltonian graph networks with ode integrators. In arXiv, 2019. 3, 20
|
| 314 |
+
|
| 315 |
+
Connor Schenck and Dieter Fox. Spnets: Differentiable fluid dynamics for deep neural networks. In International Conference on Robot Learning, 2018. 3
|
| 316 |
+
Eftychios Sifakis and Jernej Barbic. Fem simulation of 3d deformable solids: a practitioner's guide to theory, discretization and model reduction. In ACM SIGGRAPH 2012 courses, 2012. 17
|
| 317 |
+
Breannan Smith, Fernando De Goes, and Theodore Kim. Stable neo-hookean flesh simulation. ACM Transactions on Graphics, 37(2):1-15, 2018a. 17, 19
|
| 318 |
+
Edward Smith, Scott Fujimoto, and David Meger. Multi-view silhouette and depth decomposition for high resolution 3d object representation. In Neural Information Processing Systems, 2018b. 3
|
| 319 |
+
Edward J. Smith, Scott Fujimoto, Adriana Romero, and David Meger. Geometrics: Exploiting geometric structure for graph-encoded objects. International Conference on Machine Learning, 2020. 3
|
| 320 |
+
Changkyu Song and Abdeslam Boullarias. Identifying mechanical models through differentiable simulations. In Learning for Dynamical Systems and Control (L4DC), 2020a. 3, 4
|
| 321 |
+
Changkyu Song and Abdeslam Boularias. Learning to slide unknown objects with differentiable physics simulations. In Robotics Science and Systems, 2020b. 3, 4
|
| 322 |
+
Jos Stam. Stable fluids. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, pp. 121-128, 1999. 18, 20
|
| 323 |
+
Trevor Standley, Ozan Sener, Dawn Chen, and Silvio Savarese. image2mass: Estimating the mass of an object from its image. In International Conference on Robot Learning, 2017. 3
|
| 324 |
+
Giovanni Sutanto, Austin S. Wang, Yixin Lin, Mustafa Mukadam, Gaurav S. Sukhatme, Akshara Rai, and Franziska Meier. Encoding physical constraints in differentiable newton-euler algorithm. In Learning for Dynamical systems and Control (L4DC), 2020. 3
|
| 325 |
+
Mingxing Tan and Quoc Le. EfficientNet: Rethinking model scaling for convolutional neural networks. In International Conference on Machine Learning, 2019. 5, 24
|
| 326 |
+
Emanuel Todorov. Convex and analytically-invertible dynamics with contacts and constraints: Theory and implementation in mujoco. In IEEE International Conference on Robotics and Automation (ICRA), 2014. 18
|
| 327 |
+
Peter Toth, Danilo Jimenez Rezende, Andrew Jaegle, Sébastien Racanière, Aleksandar Botev, and Irina Higgins. Hamiltonian generative networks. In International Conference on Learning Representations, 2020. 3, 20
|
| 328 |
+
Marc Toussaint, Kelsey Allen, Kevin Smith, and Joshua Tenenbaum. Differentiable physics and stable modes for tool-use and manipulation planning. In *Robotics Science and Systems*, 2018. 2, 4
|
| 329 |
+
Bart van Merrienboer, Alexander B Wiltschko, and Dan Moldovan. Tangent: automatic differentiation using source code transformation in python. In Neural Information Processing Systems, 2018. 4, 17
|
| 330 |
+
Bin Wang, Paul G. Kry, Yuanmin Deng, Uri M. Ascher, Hui Huang, and Baoquan Chen. Neural material: Learning elastic constitutive material and damping models from sparse data. arXiv, 2018a. 3
|
| 331 |
+
Kun Wang, Mridul Aanjaneya, and Kostas Bekris. A first principles approach for data-efficient system identification of spring-rod systems via differentiable physics engines. In arXiv, 2020. 3
|
| 332 |
+
Nanyang Wang, Yinda Zhang, Zhuwen Li, Yanwei Fu, Wei Liu, and Yu-Gang Jiang. Pixel2mesh: Generating 3d mesh models from single rgb images. In Proceedings of the European Conference on Computer Vision, 2018b. 3
|
| 333 |
+
Nicholas Watters, Daniel Zoran, Theophane Weber, Peter Battaglia, Razvan Pascanu, and Andrea Tacchetti. Visual interaction networks: Learning a physics simulator from video. In Neural Information Processing Systems, 2017. 1, 3
|
| 334 |
+
|
| 335 |
+
P. M. Wensing, S. Kim, and J. E. Slotine. Linear matrix inequalities for physically consistent inertial parameter identification: A statistical perspective on the mass distribution. IEEE Robotics and Automation Letters, 3(1):60-67, 2018. 3
|
| 336 |
+
Ronald J. Williams. Simple statistical gradient-following algorithms for connectionist reinforcement learning. Machine Learning, 8(3-4):229-256, 1992. ISSN 0885-6125. 2, 6, 22
|
| 337 |
+
Jiajun Wu, Ilker Yildirim, Joseph J Lim, William T Freeman, and Joshua B Tenenbaum. Galileo: Perceiving physical object properties by integrating a physics engine with deep learning. In Neural Information Processing Systems, 2015. 5, 23
|
| 338 |
+
Jiajun Wu, Joseph J Lim, Hongyi Zhang, Joshua B Tenenbaum, and William T Freeman. Physics 101: Learning physical object properties from unlabeled videos. In *British Machine Vision Conference*, 2016. 3
|
| 339 |
+
Jiajun Wu, Erika Lu, Pushmeet Kohli, William T Freeman, and Joshua B Tenenbaum. Learning to see physics via visual de-animation. In Neural Information Processing Systems, 2017a. 3, 4
|
| 340 |
+
Jiajun Wu, Joshua B Tenenbaum, and Pushmeet Kohli. Neural scene de-rendering. In Proceedings of Computer Vision and Pattern Recognition, 2017b. 2
|
| 341 |
+
Qiangeng Xu, Weiyue Wang, Duygu Ceylan, Radomir Mech, and Ulrich Neumann. Disn: Deep implicit surface network for high-quality single-view 3d reconstruction. In Neural Information Processing Systems, 2019a. 3
|
| 342 |
+
Zhenjia Xu, Jiajun Wu, Andy Zeng, Joshua B. Tenenbaum, and Shuran Song. Densephysnet: Learning dense physical object representations via multi-step dynamic interactions. In Robotics Science and Systems, 2019b. 1, 3, 5, 6
|
| 343 |
+
Ilker Yildirim, Tejas Kulkarni, Winrich Freiwald, and Joshua Tenenbaum. Efficient analysis-by-synthesis in vision: A computational framework, behavioral tests, and comparison with neural representations. In CogSci, 2015. 2
|
| 344 |
+
Ilker Yildirim, Michael Janner, Mario Belledonne, Christian Wallraven, W. A. Freiwald, and Joshua B. Tenenbaum. Causal and compositional generative models in online perception. In CogSci, 2017. 2
|
| 345 |
+
Ilker Yildirim, Mario Belledonne, Winrich Freiwald, and Josh Tenenbaum. Efficient inverse graphics in biological face processing. Science Advances, 6(10), 2020. doi: 10.1126/sciadv.aax5979.2
|
| 346 |
+
L. Yu, N. Duncan, and S. Yeung. Fill and transfer: A simple physics-based approach for containability reasoning. In Proceedings of International Conference on Computer Vision, 2015. 3
|
| 347 |
+
Yaofeng Desmond Zhong, Biswadip Dey, and Amit Chakraborty. Symplectic ode-net: Learning hamiltonian dynamics with control. In International Conference on Learning Representations, 2020. 3
|
| 348 |
+
Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Open3D: A modern library for 3D data processing. arXiv:1801.09847, 2018. 3
|
| 349 |
+
|
| 350 |
+
# A APPENDIX
|
| 351 |
+
|
| 352 |
+
# A DIFFERENTIABLE PHYSICS ENGINE
|
| 353 |
+
|
| 354 |
+
Under Lagrangian mechanics, the state of a physical system can be described in terms of generalized coordinates $\mathbf{q}$ , generalized velocities $\dot{\mathbf{q}} = \mathbf{u}$ , and design, or model parameters $\theta$ . For the purposes of exposition, we make no distinction between rigid-bodies, deformable solids, or thin-shell models of cloth and other bodies. Although the specific choices of coordinates and parameters vary, the simulation procedure is virtually unchanged. We denote the combined state vector by $\mathbf{s}(t) = [\mathbf{q}(t), \mathbf{u}(t)]$ .
|
| 355 |
+
|
| 356 |
+
The dynamic evolution of the system is governed by a second order differential equations (ODE) of the form $\mathbf{M}\ddot{\mathbf{s}} = \mathbf{f}(\mathbf{s})$ , where $\mathbf{M}$ is a mass matrix that may also depend on our state and design parameters $\theta$ . Solutions to ODEs of this type may be obtained through black box numerical integration methods, and their derivatives calculated through the continuous adjoint method Chen et al. (2018). However, we instead consider our physics engine as a differentiable operation that provides an implicit relationship between a state vector $\mathbf{s}^{-} = \mathbf{s}(t)$ at the start of a time step, and the updated state at the end of the time step $\mathbf{s}^{+} = \mathbf{s}(t + \Delta t)$ . An arbitrary discrete time integration scheme can be then abstracted as the function $\mathbf{g}(\mathbf{s}^{-},\mathbf{s}^{+},\theta) = \mathbf{0}$ , relating the initial and final system state and the model parameters $\theta$ . By the implicit function theorem, if we can specify a loss function $l$ at the output of the simulator, we can compute $\frac{\partial l}{\partial\mathbf{s}^{-}}$ as $\mathbf{c}^T\frac{\partial\mathbf{g}}{\partial\mathbf{s}^{-}}$ , where $\mathbf{c}$ is the solution to the linear system $\frac{\partial\mathbf{g}}{\partial\mathbf{s}^{+}}^T\mathbf{c} = -\frac{\partial l}{\partial\mathbf{s}^{+}}^T$ , and likewise for the model parameters $\theta$ .
|
| 357 |
+
|
| 358 |
+
While the partial derivatives $\frac{\partial\mathbf{g}}{\partial\mathbf{s}^{-}},\frac{\partial\mathbf{g}}{\partial\mathbf{s}^{+}},\frac{\partial\mathbf{g}}{\partial\theta}$ can be computed by graph-based automatic differentiation frameworks Paszke et al. (2019); Abadi et al. (2015); Bradbury et al. (2018), program transformation approaches such as DiffTaichi, and Google Tangent Hu et al. (2020); van Merrienboer et al. (2018) are particularly well-suited to simulation code. We use an embedded subset of Python syntax, which computes the adjoint of each simulation kernel at runtime, and generates C++/CUDA Kirk et al. (2007) code. Kernels are wrapped as custom autograd operations on PyTorch tensors, which allows users to focus on the definition of physical models, and leverage the PyTorch tape-based autodiff to track the overall program flow. While this formulation is general enough to represent explicit, multi-step, or fully implicit time-integration schemes, we employ semi-implicit Euler integration, which is the preferred integration scheme for most simulators Erez et al. (2015).
|
| 359 |
+
|
| 360 |
+
# A.1 PHYSICAL MODELS
|
| 361 |
+
|
| 362 |
+
We now discuss some of the physical models available in $\nabla Sim$ .
|
| 363 |
+
|
| 364 |
+
Deformable Solids: In contrast with existing simulators that use grid-based methods for differentiable soft-body simulation Hu et al. (2019; 2020), we adopt a finite element (FEM) model with constant strain tetrahedral elements common in computer graphics Sifakis & Barbic (2012). We use the stable Neo-Hookean constitutive model of Smith et al. Smith et al. (2018a) that derives per-element forces from the following strain energy density:
|
| 365 |
+
|
| 366 |
+
$$
|
| 367 |
+
\Psi (\mathbf {q}, \theta) = \frac {\mu}{2} (I _ {C} - 3) + \frac {\lambda}{2} (J - \alpha) ^ {2} - \frac {\mu}{2} \log (I _ {C} + 1), \tag {1}
|
| 368 |
+
$$
|
| 369 |
+
|
| 370 |
+
where $I_C, J$ are invariants of strain, $\theta = [\mu, \lambda]$ are the Lamé parameters, and $\alpha$ is a per-element actuation value that allows the element to expand and contract.
|
| 371 |
+
|
| 372 |
+
Numerically integrating the energy density over each tetrahedral mesh element with volume $V_{e}$ gives the total elastic potential energy, $U(\mathbf{q},\theta) = \sum V_{e}\Psi_{e}$ . The forces due to this potential $\mathbf{f}_e(\mathbf{s},\theta) = -\nabla_{\mathbf{q}}U(\mathbf{q},\theta)$ , can be computed analytically, and their gradients obtained using the adjoint method (cf. Section 3.1).
|
| 373 |
+
|
| 374 |
+
Deformable Thin-Shells: To model thin-shells such as clothing, we use constant strain triangular elements embedded in 3D. The Neo-Hookean constitutive model above is applied to model in-plane elastic deformation, with the addition of a bending energy $\mathbf{f}_b(\mathbf{s},\theta) = k_b\sin (\frac{\phi}{2} +\alpha)\mathbf{d}$ , where $k_{b}$ is the bending stiffness, $\phi$ is the dihedral angle between two triangular faces, $\alpha$ is a per-edge actuation value that allows the mesh to flex inwards or outwards, and $\mathbf{d}$ is the force direction given by Bridson
|
| 375 |
+
|
| 376 |
+
et al. (2005). We also include a lift/drag model that approximates the effect of the surrounding air on the surface of mesh.
|
| 377 |
+
|
| 378 |
+
Rigid Bodies: We represent the state of a 3D rigid body as $\mathbf{q}_b = [\mathbf{x},\mathbf{r}]$ consisting of a position $\mathbf{x}\in \mathbb{R}^3$ , and a quaternion $\mathbf{r}\in \mathbb{R}^4$ . The generalized velocity of a body is $\mathbf{u}_b = [\mathbf{v},\omega ]$ and the dynamics of each body is given by the Newton-Euler equations,
|
| 379 |
+
|
| 380 |
+
$$
|
| 381 |
+
\left[ \begin{array}{l l} m & \mathbf {0} \\ \mathbf {0} & \mathbf {I} \end{array} \right] \left[ \begin{array}{l} \dot {\mathbf {v}} \\ \dot {\omega} \end{array} \right] = \left[ \begin{array}{l} \mathbf {f} \\ \tau \end{array} \right] - \left[ \begin{array}{l} \mathbf {0} \\ \omega \times \mathbf {I} \omega \end{array} \right] \tag {2}
|
| 382 |
+
$$
|
| 383 |
+
|
| 384 |
+
where the mass $m$ and inertia matrix $\mathbf{I}$ (expressed at the center of mass) are considered design parameters $\theta$ .
|
| 385 |
+
|
| 386 |
+
Contact: We adopt a compliant contact model that associates elastic and damping forces with each nodal contact point. The model is parameterized by four scalars $\theta = [k_e, k_d, k_f, \mu]$ , corresponding to elastic stiffness, damping, frictional stiffness, and friction coefficient respectively. To prevent interpenetration we use a proportional penalty-based force, $\mathbf{f}_n(\mathbf{s}, \theta) = -\mathbf{n}[k_e C(\mathbf{q}) + k_d \dot{C}(\mathbf{u})]$ , where $\mathbf{n}$ is a contact normal, and $C$ is a gap function measure of overlap projected on to $\mathbb{R}^+$ . We model friction using a relaxed Coulomb model Todorov (2014) $\mathbf{f}_f(\mathbf{s}, \theta) = -\mathbf{D}[\min(\mu | \mathbf{f}_n|, k_f \mathbf{u}_s)]$ , where $\mathbf{D}$ is a basis of the contact plane, and $\mathbf{u}_s = \mathbf{D}^T \mathbf{u}$ is the sliding velocity at the contact point. While these forces are only $C^0$ continuous, we found that this was sufficient for optimization over a variety of objectives.
|
| 387 |
+
|
| 388 |
+
More physical simulations: We also implement a number of other differentiable simulations such as pendula, mass-springs, and incompressible fluids Stam (1999). We note these systems have already been demonstrated in prior art, and thus focus on the more challenging systems in our paper.
|
| 389 |
+
|
| 390 |
+
# B DISCRETE ADJOINT METHOD
|
| 391 |
+
|
| 392 |
+
Above, we presented a formulation of time-integration using the discrete adjoint method that represents an arbitrary time-stepping scheme through the implicit relation,
|
| 393 |
+
|
| 394 |
+
$$
|
| 395 |
+
\mathbf {g} \left(\mathbf {s} ^ {-}, \mathbf {s} ^ {+}, \theta\right) = \mathbf {0}. \tag {3}
|
| 396 |
+
$$
|
| 397 |
+
|
| 398 |
+
This formulation is general enough to represent both explicit or implicit time-stepping methods. While explicit methods are often simple to implement, they may require extremely small time-steps for stability, which is problematic for reverse-mode automatic differentiation frameworks that must explicitly store the input state for each discrete timestep invocation of the integration routine. On the other hand, implicit methods can introduce computational overhead or unwanted numerical dissipation Hairer et al. (2006). For this reason, many real-time physics engines employ a semi-implicit (symplectic) Euler integration scheme Erez et al. (2015), due to its ease of implementation and numerical stability in most meaningful scenarios (conserves energy for systems where the Hamiltonian is time-invariant).
|
| 399 |
+
|
| 400 |
+
We now give a concrete example of the discrete adjoint method applied to semi-implicit Euler. For the state variables defined above, the integration step may be written as follows,
|
| 401 |
+
|
| 402 |
+
$$
|
| 403 |
+
\mathbf {g} \left(\mathbf {s} ^ {-}, \mathbf {s} ^ {+}, \theta\right) = \left[ \begin{array}{l} \mathbf {u} ^ {+} - \mathbf {u} ^ {-} - \Delta t \mathbf {M} ^ {- 1} \mathbf {f} \left(\mathbf {s} ^ {-}\right) \\ \mathbf {q} ^ {+} - \mathbf {q} ^ {-} - \Delta t \mathbf {u} ^ {+} \end{array} \right] = \mathbf {0}. \tag {4}
|
| 404 |
+
$$
|
| 405 |
+
|
| 406 |
+
Note that in general, the mass matrix $\mathbf{M}$ is a function of $\mathbf{q}$ and $\theta$ . For conciseness we only consider the dependence on $\theta$ , although the overall procedure is unchanged in the general case. We provide a brief sketch of computing the gradients of $\mathbf{g}(\mathbf{s}^{-},\mathbf{s}^{+},\theta)$ . In the case of semi-implicit integration above, these are given by the following equations:
|
| 407 |
+
|
| 408 |
+
$$
|
| 409 |
+
\frac {\partial \mathbf {g}}{\partial \mathbf {s} ^ {-}} = \left[ \begin{array}{c c} - \Delta t \mathbf {M} ^ {- 1} \frac {\partial \mathbf {f}}{\partial \mathbf {q} (t)} & - \mathbf {I} - \Delta t \mathbf {M} ^ {- 1} \frac {\partial \mathbf {f}}{\partial \mathbf {u} (t)} \\ - \mathbf {I} & 0 \end{array} \right] \quad \frac {\partial \mathbf {g}}{\partial \mathbf {s} ^ {+}} = \left[ \begin{array}{c c} 0 & \mathbf {I} \\ \mathbf {I} & - \Delta t \mathbf {I} \end{array} \right] \quad \frac {\partial \mathbf {g}}{\partial \theta} = \left[ \begin{array}{c} - \Delta t \frac {\partial \mathbf {M} ^ {- 1}}{\partial \theta} \\ \mathbf {0} \end{array} \right]. \tag {5}
|
| 410 |
+
$$
|
| 411 |
+
|
| 412 |
+
In the case of semi-implicit Euler, the triangular structure of these Jacobians allows the adjoint variables to be computed explicitly. For fully implicit methods such as backwards Euler, the Jacobians may create a linear system that must be first solved to generate adjoint variables.
|
| 413 |
+
|
| 414 |
+
# C PHYSICAL MODELS
|
| 415 |
+
|
| 416 |
+
We now undertake a more detailed discussion of the physical models implemented in $\nabla Sim$ .
|
| 417 |
+
|
| 418 |
+

|
| 419 |
+
(a) Triangular FEM element
|
| 420 |
+
|
| 421 |
+

|
| 422 |
+
(b) Tetrahedral FEM element
|
| 423 |
+
Figure 8: Mesh Discretization: We use triangular (a) and tetrahedral (b) FEM models with angle-based and volumetric activation parameters, $\alpha$ . These mesh-based discretizations are a natural fit for our differentiable rasterization pipeline, which is designed to operate on triangles.
|
| 424 |
+
|
| 425 |
+
# C.1 FINITE ELEMENT METHOD
|
| 426 |
+
|
| 427 |
+
As described in section 3.2 ("Physical models"), we use a hyperelastic constitutive model based on the neo-Hookean model of Smith et al. Smith et al. (2018a):
|
| 428 |
+
|
| 429 |
+
$$
|
| 430 |
+
\Psi (\mathbf {q}, \theta) = \frac {\mu}{2} \left(I _ {C} - 3\right) + \frac {\lambda}{2} (J - \alpha) ^ {2} - \frac {\mu}{2} \log \left(I _ {C} + 1\right). \tag {6}
|
| 431 |
+
$$
|
| 432 |
+
|
| 433 |
+
The Lamé parameters, $\lambda, \mu$ , control the element's resistance to shearing and volumetric strains. These may be specified on a per-element basis, allowing us to represent heterogeneous materials. In contrast to other work using particle-based models Hu et al. (2020), we adopt a mesh-based discretization for deformable shells and solids. For thin-shells, such as cloth, the surface is represented by a triangle mesh as in Figure 8a, enabling straightforward integration with our triangle mesh-based differentiable rasterizer Liu et al. (2019); Chen et al. (2019). For solids, we use a tetrahedral FEM model as illustrated in Figure 8b. Both these models include a per-element activation parameter $\alpha$ , which for thin-shells, allows us to control the relative dihedral angle between two connected faces. For tetrahedral meshes, this enables changing the element's volume, enabling locomotion, as in the control-fem example.
|
| 434 |
+
|
| 435 |
+
# C.2 CONTACT
|
| 436 |
+
|
| 437 |
+
Implicit contact methods based on linear complementarity formulations (LCP) of contact may be used to maintain hard non-penetration constraints de Avila Belbute-Peres et al. (2018). However, we found relaxed models of contact—used in typical physics engines Erez et al. (2015)—were sufficient for our experiments. In this approach, contact forces are derived from a one-sided quadratic potential, giving rise to penalty forces of the form 9a. While Coulomb friction may also be modeled as an LCP, we use a relaxed model where the stick regime is represented by a stiff quadratic potential around the origin, and a linear portion in the slip regime, as shown in Figure 9b. To generate contacts, we test each vertex of a mesh against a collision plane and introduce a contact within some distance threshold $d$ .
|
| 438 |
+
|
| 439 |
+

|
| 440 |
+
(a)
|
| 441 |
+
|
| 442 |
+

|
| 443 |
+
(b)
|
| 444 |
+
Figure 9: Contact Model: To model non-interpenetration constraints we use a relaxed model of contact that replaces a delta function with a linear hinge corresponding to a quadratic penalty energy (a). To model friction we use a relaxed Coulomb model, that replaces the step function with a symmetric hinge (b).
|
| 445 |
+
|
| 446 |
+
# C.3 PENDULA
|
| 447 |
+
|
| 448 |
+
We also implement simple and double pendula, as toy examples of well-behaved and chaotic systems respectively, and estimate the parameters of the system (i.e., the length(s) of the rod(s) and initial angular displacement(s)), by comparing the rendered videos (assuming uniformly random initial guesses) with the true videos. As pendula have extensively been studied in the context of differentiable physics simulation Degrave et al. (2016); de Avila Belbute-Peres et al. (2018); Cranmer et al. (2020b); Toth et al. (2020); Greydanus et al. (2019); Sanchez-Gonzalez et al. (2019), we focus on more challenging systems which have not been studied in prior art.
|
| 449 |
+
|
| 450 |
+
# C.4 INCOMPRESSIBLE FLUIDS
|
| 451 |
+
|
| 452 |
+
As an example of incompressible fluid simulation, we implement a smoke simulator following the popular semi-Lagrangian advection scheme of Stam et al. Stam (1999). At 2:20 in our supplementary video attachment, we show an experiment which optimizes the initial velocities of smoke particles to form a desired pattern. Similar schemes have already been realized differentiably, e.g. in DiffTaichi Hu et al. (2020) and autograd Maclaurin et al. (2015).
|
| 453 |
+
|
| 454 |
+
# D SOURCE-CODE TRANSFORMATION FOR AUTOMATIC DIFFERENTIATION
|
| 455 |
+
|
| 456 |
+
The discrete adjoint method requires computing gradients of physical quantities with respect to state and design parameters. To do so, we adopt a source code transformation approach to perform reverse mode automatic differentiation Hu et al. (2020); Margossian (2019). We use a domain-specific subset of the Python syntax extended with primitives for representing vectors, matrices, and quaternions. Each type includes functions for acting on them, and the corresponding adjoint method. An example simulation kernel is then defined as follows:
|
| 457 |
+
|
| 458 |
+
```txt
|
| 459 |
+
1 @kernel
|
| 460 |
+
2 def integrateParticles(
|
| 461 |
+
3 x : tensor(float3),
|
| 462 |
+
4 v : tensor(float3),
|
| 463 |
+
5 f : tensor(float3),
|
| 464 |
+
6 w : tensor(float3),
|
| 465 |
+
7 gravity : tensor(float3),
|
| 466 |
+
8 dt : float,
|
| 467 |
+
9 x_new : tensor(float3),
|
| 468 |
+
10 v_new : tensor(float3)
|
| 469 |
+
11 ):
|
| 470 |
+
12
|
| 471 |
+
13 # Get thread ID
|
| 472 |
+
14 thread_id = tid()
|
| 473 |
+
15
|
| 474 |
+
16 # Load state variables and parameters
|
| 475 |
+
17 x0 = load(x, thread_id)
|
| 476 |
+
```
|
| 477 |
+
|
| 478 |
+
Listing 1: Particle Integration Kernel
|
| 479 |
+
```txt
|
| 480 |
+
18 v0 = load(v, thread_id)
|
| 481 |
+
19 f0 = load(f, thread_id)
|
| 482 |
+
20 inv_mass = load(w, thread_id)
|
| 483 |
+
21
|
| 484 |
+
22 # Load external forces
|
| 485 |
+
23 g = load(gravity, 0)
|
| 486 |
+
24
|
| 487 |
+
25 # Semi-implicit Euler
|
| 488 |
+
26 v1 = v0 + (f0 * inv_mass - g * step(inv_mass)) * dt
|
| 489 |
+
27 x1 = x0 + v1 * dt
|
| 490 |
+
28
|
| 491 |
+
29 # Store results
|
| 492 |
+
30 store(x_new, thread_id, x1)
|
| 493 |
+
31 store(v_new, thread_id, v1)
|
| 494 |
+
```
|
| 495 |
+
|
| 496 |
+
At runtime, the kernel's abstract syntax tree (AST) is parsed using Python's built-in ast module. We then generate C++ kernel code for forward and reverse mode, which may be compiled to a CPU or GPU executable using the PyTorch torch.utils.cpp_extension mechanism.
|
| 497 |
+
|
| 498 |
+
This approach allows writing imperative code, with fine-grained indexing and implicit operator fusion (since all operations in a kernel execute as one GPU kernel launch). Each kernel is wrapped as a PyTorch autograd operation so that it fits natively into the larger computational graph.
|
| 499 |
+
|
| 500 |
+
# E MPC CONTROLLER ARCHITECTURE
|
| 501 |
+
|
| 502 |
+
For our model predictive control examples, we use a simple 3-layer neural network architecture illustrated in Figure 10. With simulation time $t$ as input we generate $N$ phase-shifted sinusoidal signals which are passed to a fully-connected layer (zero-bias), and a final activation layer. The output is a vector of per-element activation values as described in the previous section.
|
| 503 |
+
|
| 504 |
+

|
| 505 |
+
Figure 10: Our simple network architecture used the for control-walker and control-fem tasks.
|
| 506 |
+
|
| 507 |
+
# F LOSS LANDSCAPES FOR PARAMETER ESTIMATION OF DEFORMABLE SOLIDS
|
| 508 |
+
|
| 509 |
+
$\nabla Sim$ integrates several functional blocks, many of which contain nonlinear operations. Furthermore, we employ a pixelwise mean-squared error (MSE) loss function for estimating physical parameters from video. To demonstrate whether the gradients obtained from $\nabla Sim$ are relevant for the task of physical parameter estimation, in Figure 2 of the main paper, we present an analysis of the MSE loss landscape for mass estimation.
|
| 510 |
+
|
| 511 |
+
# F.1 ELASTICITY PARAMETER
|
| 512 |
+
|
| 513 |
+
We now present a similar analysis for elasticity parameter estimation in deformable solids. Figure 11a shows the loss landscape when optimizing for the Lamé parameters of a deformable solid FEM. In this case, both parameters $\lambda$ and $\mu$ are set to 1000. As can be seen in the plot, the loss landscape has
|
| 514 |
+
|
| 515 |
+
a unique, dominant minimum at 1000. We believe the well-behaved nature of our loss landscape is a key contributing factor to the precise physical-parameter estimation ability of $\nabla Sim$ .
|
| 516 |
+
|
| 517 |
+
# F.2 LOSS LANDSCAPE IN PYBULET (REINFORCE)
|
| 518 |
+
|
| 519 |
+
Figure 11 shows how optimization using REINFORCE can introduce complications. As the simulation becomes unstable with masses close to zero, poor local optimum can arise near the mean of the current estimated mass. This illustrates that optimization through REINFORCE is only possible after careful tuning of step size, sampling noise and sampling range. This reduces the utility of this method in a realistic setting where these hyperparameters are not known a priori.
|
| 520 |
+
|
| 521 |
+

|
| 522 |
+
(a) Lamé loss landscape
|
| 523 |
+
|
| 524 |
+

|
| 525 |
+
(b) PyBullet loss landscape
|
| 526 |
+
Figure 11: Loss Landscapes: (left) when optimizing for the elasticity parameters of a deformable FEM solid. Both the Lamé parameters $\lambda$ and $\mu$ are set to 1000, where the MSE loss has a unique, dominant minimum. (right) when optimizing for the mass, the reward (negative normalized MSE) has a maximum close to the ground truth maximum but the negative log likelihood of each mass sample that's multiplied with the reward only shows a local minimum that's sensitive to the center of the current mass estimate.
|
| 527 |
+
|
| 528 |
+
# F.3 IMPACT OF THE LENGTH OF A VIDEO SEQUENCE
|
| 529 |
+
|
| 530 |
+
To assess the impact of the length of a video on the quality of our solution, we plot the loss landscapes for videos of varying lengths in Fig. 12. We find that shorter videos tend to have steeper loss landscapes compared to longer ones. The frame-rate also has an impact on the steepness of the landscape. In all cases though, the loss landscape is smooth and has the same unique minimum.
|
| 531 |
+
|
| 532 |
+
# G DATASET DETAILS
|
| 533 |
+
|
| 534 |
+
For the rigid-body task of physical parameter estimation from video, we curated a dataset comprising of 14 meshes, as shown in Fig. 13. The objects include a combination of primitive shapes, fruits and vegetables, animals, office objects, and airplanes. For each experiment, we select an object at random, and sample its physical attributes from a predefined range: densities from the range [2, 12] $kg/m^3$ , contact parameters $k_e, k_d, k_f$ from the range [1, 500], and a coefficient of friction $\mu$ from the range [0.2, 1.0]. The positions, orientations, (anisotropic) scale factors, and initial velocities are sampled uniformly at random from a cube of side-length $13m$ centered on the camera. Across all rigid-body experiments, we use 800 objects for training and 200 objects for testing.
|
| 535 |
+
|
| 536 |
+
# H BASELINES
|
| 537 |
+
|
| 538 |
+
In this section, we present implementation details of the baselines used in our experiments.
|
| 539 |
+
|
| 540 |
+
# H.1 PYBULET + REINFORCE
|
| 541 |
+
|
| 542 |
+
To explore whether existing non-differentiable simulators can be employed for physical parameter estimation, we take PyBullet Coumans & Bai (2016-2019) - a popular physics engine - and make it trivially differentiable, by gradient estimation. We employ the REINFORCE Williams (1992)
|
| 543 |
+
|
| 544 |
+

|
| 545 |
+
Figure 12: Impact of the length of a video sequence on the loss landscape. Notice how the loss landscape is much steeper for smaller videos (e.g., MSE of first and last frames). Nonetheless, all cases have a smooth loss landscape with the same unique minimum.
|
| 546 |
+
|
| 547 |
+

|
| 548 |
+
Figure 13: Objects used in our rigid-body experiments. All of these meshes have been simplified to contain 250 or fewer vertices, for faster collision detection times.
|
| 549 |
+
|
| 550 |
+
technique to acquire an approximate gradient through the otherwise non-differentiable environment. The implementation was inspired by Wu et al. (2015) and Rezende et al. (2016). In concurrent work, a similar idea was explored in Ehsani et al. (2020).
|
| 551 |
+
|
| 552 |
+
In PyBullet, the mass parameter of the object is randomly initialized in the range $[0, N_v]$ , where $N_v$ is the number of vertices, the object is set to the same starting position and orientation as in the dataset, and the camera parameters are identical to those used in the dataset. This configuration ensures that if the mass was correct, the video frames rendered out by PyBullet would perfectly align with those generated by $\nabla Sim$ . Each episode is rolled out for the same duration as in the dataset (60 frames, corresponding to 2 seconds of motion). In PyBullet this is achieved by running the simulation at $240\mathrm{Hz}$ and skipping 7 frames between observations. The REINFORCE reward is calculated by summing the individual $L2$ losses between ground truth frames and PyBullet frames, then multiplying each by $-1$ to establish a global maximum at the correct mass, in contrast with a global minimum as in $\nabla Sim$ . When all individual frame rewards have been calculated, all trajectory rewards are normalized before calculating the loss. This ensures that the reward is scaled correctly with respect to REINFORCE's negative sample log likelihood, but when the mass value approaches the local optimum, this leads to instability in the optimization process. To mitigate this instability,
|
| 553 |
+
|
| 554 |
+
we introduce reward decay, which a hyperparameter that slowly decreases the reward values as optimization progresses, in a similar manner to learning rate decay. Before each optimization step, all normalized frame reward values are multiplied by reward Decay. After the optimization step, the decay is updated by reward Decay = reward Decay * decay_factor. The hyperparameters used in this baseline can be found in Table 5.
|
| 555 |
+
|
| 556 |
+
<table><tr><td>Parameter</td><td>Value</td><td>Meaning</td></tr><tr><td>no_samples</td><td>5</td><td>How often was the mass sampled at every step</td></tr><tr><td>optimization_steps</td><td>125</td><td>Total number of optimization steps</td></tr><tr><td>sample_noise</td><td>0.05</td><td>Std. dev. of normal distribution that mass is sampled from</td></tr><tr><td>decay_factor</td><td>0.925</td><td>Factor that reward decay is multiplied with after optimizer step</td></tr><tr><td>dataset_size</td><td>200</td><td>Number of bodies that the method was evaluated on</td></tr></table>
|
| 557 |
+
|
| 558 |
+
# H.2 CNN FOR DIRECT PARAMETER ESTIMATION
|
| 559 |
+
|
| 560 |
+
In the rigid-body parameter estimation experiments, we train a ConvNet baseline, building on the EfficientNet-B0 architecture Tan & Le (2019). The ConvNet consists of two convolutional layers with parameters (PyTorch convention): (1280, 128, 1), (128, 32, 1), followed by linear layers and ReLU activations with sizes [7680, 1024, 100, 100, 100, 5]. No activation is applied over the output of the ConvNet. We train the model to minimize the mean-squared error between the estimated and the true parameters, and use the Adam optimizer Kingma & Ba (2015) with learning rate of 0.0001. Each model was trained for 100 epochs on a V100 GPU. The input image frames were preprocessed by resizing them to $64 \times 64$ pixels (to reduce GPU memory consumption) and the features were extracted with a pretrained EfficientNet-B0.
|
| 561 |
+
|
| 562 |
+
# I COMPUTE AND TIMING DETAILS
|
| 563 |
+
|
| 564 |
+
Most of the models presented in $\nabla Sim$ can be trained and evaluated on modern laptops equipped with graphics processing units (GPUs). We find that, on a laptop with an Intel i7 processor and a GeForce GTX 1060 GPU, parameter estimation experiments for rigid/nonrigid bodies can be run in under 5-20 minutes per object on CPU and in under 1 minute on the GPU. The visuomotor control experiments (control-fem, control-cloth) take about 30 minutes per episode on the CPU and under 5 minutes per episode on the GPU.
|
| 565 |
+
|
| 566 |
+
# J OVERVIEW OF AVAILABLE DIFFERENTIABLE SIMULATIONS
|
| 567 |
+
|
| 568 |
+
Table 6 presents an overview of the differentiable simulations implemented in $\nabla Sim$ , and the optimizable parameters therein.
|
| 569 |
+
|
| 570 |
+
Table 5: PyBullet-REINFORCE hyperparameters.
|
| 571 |
+
|
| 572 |
+
<table><tr><td></td><td>pos</td><td>vel</td><td>mass</td><td>rot</td><td>rest</td><td>stiff</td><td>damp</td><td>actuation</td><td>g</td><td>μ</td><td>e</td><td>ext forces</td></tr><tr><td>Rigid body</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td><td></td></tr><tr><td>Simple pendulum</td><td>✓</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>✓</td><td></td><td></td><td>✓</td></tr><tr><td>Double pendulum</td><td>✓</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td>✓</td><td></td><td></td><td>✓</td></tr><tr><td>Deformable object</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td></tr><tr><td>Cloth</td><td>✓</td><td>✓</td><td>✓</td><td></td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td>✓</td><td></td><td></td></tr><tr><td>Fluid (Smoke) (2D)</td><td></td><td>✓</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr></table>
|
| 573 |
+
|
| 574 |
+
Table 6: An overview of **optimizable parameters in ∇Sim**. Table columns are (in order, from left to right): Initial particle positions (pos), Initial particle velocities (vel), Per-particle mass (mass), Initial object orientation (rot), Spring rest lengths (rest), Spring stiffnesses (stiff), Spring damping coefficients (damp), Actuation parameters (actuation), Gravity (g), Friction parameters μ, Elasticity parameters (e), External force parameters (ext forces).
|
| 575 |
+
|
| 576 |
+
# K LIMITATIONS
|
| 577 |
+
|
| 578 |
+
While providing a wide range of previously inaccessible capabilities, $\nabla Sim$ has a few limitations that we discuss in this section. These shortcomings also form interesting avenues for subsequent research.
|
| 579 |
+
|
| 580 |
+
- $\nabla Sim$ (and equivalently $\nabla PyBullet$ ) are inept at handling tiny masses ( $100g$ and less). Optimizing for physical parameters for such objects requires a closer look at the design of physics engine and possibly, numerical stability.
|
| 581 |
+
- Articulated bodies are not currently implemented in $\nabla Sim$ . Typically, articulated bodies are composed of multiple prismatic joints which lend additional degrees of freedom to the system.
|
| 582 |
+
- While capable of modeling contacts with simple geometries (such as between arbitrary triangle meshes and planar surfaces), $\nabla Sim$ has limited capability to handle contact-rich motion that introduces a large number of discontinuities. One way to handle contacts differentiably could be to employ more sophisticated contact detection techniques and solve a linear complementarity problem (LCP) at each step, as done in de Avila Belbute-Peres et al. (2018).
|
| 583 |
+
- Aside from the aforementioned drawbacks, we note that physics engines are adept at modeling phenomena which can be codified. However, there are several unmodeled physical phenomena that occur in real-world videos which must be studied in order for $\nabla Sim$ to evolve as a scalable framework capable of operating in the wild.
|
| 584 |
+
|
| 585 |
+
# L BROADER IMPACT
|
| 586 |
+
|
| 587 |
+
Much progress has been made on end-to-end learning in visual domains. If successful, image and video understanding promises far-reaching applications from safer autonomous vehicles to more realistic computer graphics, but relying on these tools for planning and control poses substantial risk.
|
| 588 |
+
|
| 589 |
+
Neural information processing systems have shown experimentally promising results on visuomotor tasks, yet fail in unpredictable and unintuitive ways when deployed in real-world applications. If embodied learning agents are to play a broader role in the physical world, they must be held to a higher standard of interpretability. Establishing trust requires not just empirical, but explanatory evidence in the form of physically grounded models.
|
| 590 |
+
|
| 591 |
+
Our work provides a bridge between gradient- and model-based optimization. Explicitly modeling visual dynamics using well-understood physical principles has important advantages for human explainability and debuggability.
|
| 592 |
+
|
| 593 |
+
Unlike end-to-end neural architectures which distribute bias across a large set of parameters, $\nabla$ Sim trades their flexibility for physical interpretability. This does not eliminate the risk of bias in simulation, but allows us to isolate bias to physically grounded variables. Where discrepancy occurs, users can probe the model to obtain end-to-end gradients with respect to variation in physical orientation and material properties, or pixelwise differences. Differentiable simulators like $\nabla$ Sim afford a number of opportunities for use and abuse. We envision the following scenarios.
|
| 594 |
+
|
| 595 |
+
- A technician could query a trained model, "What physical parameters is the steering controller most sensitive to?", or "What happens if friction were slightly lower on that stretch of roadway?"
|
| 596 |
+
- An energy-conscious organization could use $\nabla \mathrm{Sim}$ to accelerate convergence of reinforcement learning models, reducing the energy consumption required for training.
|
| 597 |
+
- Using differentiable simulation, an adversary could efficiently construct a physically plausible scene causing the model to produce an incorrect prediction or take an unsafe action.
|
| 598 |
+
|
| 599 |
+
Video understanding is a world-building exercise with inherent modeling bias. Using physically well-studied models makes those modeling choices explicit, however mitigating the risk of bias still requires active human participation in the modeling process. While a growing number of physically-based rendering and animation efforts are currently underway, our approach does require a high upfront engineering cost in simulation infrastructure. To operationalize these tools, we anticipate practitioners will need to devote significant effort to identifying and replicating unmodeled dynamics from real world-trajectories. Differentiable simulation offers a computationally tractable and physically interpretable pathway for doing so, allowing users to estimate physical trajectories and the properties which govern them.
|
gradsimdifferentiablesimulationforsystemidentificationandvisuomotorcontrol/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e42853f2cc02d843ffe57077992c814908baff42d6ec27a74b9fd908495a6063
|
| 3 |
+
size 523103
|
gradsimdifferentiablesimulationforsystemidentificationandvisuomotorcontrol/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8594d0e52821ad9afba3754b749719f0bb9dbc34854602a72bff771c985af4cf
|
| 3 |
+
size 793428
|
notmiwaedeepgenerativemodellingwithmissingnotatrandomdata/2a76e7f9-4c61-476b-980a-e683c4837d2f_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:55c28556069fd9cc33bf48cfbf0ddd0786b554200080f6068458dfc60e11cd13
|
| 3 |
+
size 119037
|
notmiwaedeepgenerativemodellingwithmissingnotatrandomdata/2a76e7f9-4c61-476b-980a-e683c4837d2f_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:44b8d1f8bc9b590971cd6e03cbd9a312bda0384877fb5b782e27b74c446eb4c5
|
| 3 |
+
size 147526
|
notmiwaedeepgenerativemodellingwithmissingnotatrandomdata/2a76e7f9-4c61-476b-980a-e683c4837d2f_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3d915b78372e7d9b39000f173dd0c7690f2668c74667cb35c5bcf3fce088c9a3
|
| 3 |
+
size 864952
|
notmiwaedeepgenerativemodellingwithmissingnotatrandomdata/full.md
ADDED
|
@@ -0,0 +1,512 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NOT-MIWAE: DEEP GENERATIVE MODELLING WITH MISSING NOT AT RANDOM DATA
|
| 2 |
+
|
| 3 |
+
Niels Bruun Ipsen*
|
| 4 |
+
|
| 5 |
+
nbip@dtu.dk
|
| 6 |
+
|
| 7 |
+
Pierre-Alexandre Mattei†‡
|
| 8 |
+
|
| 9 |
+
pierre-alexandre.mattei@inria.fr
|
| 10 |
+
|
| 11 |
+
Jes Frellsen*†
|
| 12 |
+
|
| 13 |
+
jefr@dtu.dk
|
| 14 |
+
|
| 15 |
+
# ABSTRACT
|
| 16 |
+
|
| 17 |
+
When a missing process depends on the missing values themselves, it needs to be explicitly modelled and taken into account while doing likelihood-based inference. We present an approach for building and fitting deep latent variable models (DLVMs) in cases where the missing process is dependent on the missing data. Specifically, a deep neural network enables us to flexibly model the conditional distribution of the missingness pattern given the data. This allows for incorporating prior information about the type of missingness (e.g. self-censoring) into the model. Our inference technique, based on importance-weighted variational inference, involves maximising a lower bound of the joint likelihood. Stochastic gradients of the bound are obtained by using the reparameterisation trick both in latent space and data space. We show on various kinds of data sets and missingness patterns that explicitly modelling the missing process can be invaluable.
|
| 18 |
+
|
| 19 |
+
# 1 INTRODUCTION
|
| 20 |
+
|
| 21 |
+
Missing data often constitute systemic issues in real-world data analysis, and can be an integral part of some fields, e.g. recommender systems. This requires the analyst to take action by either using methods and models that are applicable to incomplete data or by performing imputations of the missing data before applying models requiring complete data. The expected model performance (often measured in terms of imputation error or innocuity of missingness on the inference results) depends on the assumptions made about the missing mechanism and how well those assumptions match the true missing mechanism. In a seminal paper, Rubin (1976) introduced a formal probabilistic framework to assess missing mechanism assumptions and their consequences. The most commonly used assumption, either implicitly or explicitly, is that a part of the data is missing at random
|
| 22 |
+
|
| 23 |
+
(MAR). Essentially, the MAR assumption means that the missing pattern does not depend on the missing values. This makes it possible to ignore the missing data mechanism in likelihood-based inference by marginalizing over the missing data. The often implicit assumption made in non-probabilistic models and ad-hoc methods is that the data are missing completely at random (MCAR). MCAR is a stronger assumption than MAR, and informally it means that both observed and missing data do not depend on the missing pattern. More details on these assumptions can be found in the monograph of Little & Rubin (2002); of particular interest are also the recent revisits of Seaman et al. (2013) and Doretti et al. (2018). In this paper, our goal is to posit statistical models that leverage deep learning in order to break away from these assumptions. Specifically, we propose a general
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Figure 1: (a) Graphical model of the not-MIwAE. (b) Gaussian data with MNAR values. Dots are fully observed, partially observed data are displayed as black crosses. A contour of the true distribution is shown together with directions found by PPCA and not-MIwAE with a PPCA decoder.
|
| 27 |
+
|
| 28 |
+
recipe for dealing with cases where there is prior information about the distribution of the missing pattern given the data (e.g. self-censoring).
|
| 29 |
+
|
| 30 |
+
The MAR and MCAR assumptions are violated when the missing data mechanism is dependent on the missing data themselves. This setting is called missing not at random (MNAR). Here the missing mechanism cannot be ignored, doing so will lead to biased parameter estimates. This setting generally requires a joint model for data and missing mechanism.
|
| 31 |
+
|
| 32 |
+
Deep latent variable models (DLVMs, Kingma & Welling, 2013; Rezende et al., 2014) have recently been used for inference and imputation in missing data problems (Nazabal et al., 2020; Ma et al., 2018; 2019; Ivanov et al., 2019; Mattei & Frellsen, 2019). This led to impressive empirical results in the MAR and MCAR case, in particular for high-dimensional data.
|
| 33 |
+
|
| 34 |
+
# 1.1 CONTRIBUTIONS
|
| 35 |
+
|
| 36 |
+
We introduce the not-missing-at-random importance-weighted autoencoder (not-MIWAE) which allows for the application of DLVMs to missing data problems where the missing mechanism is MNAR. This is inspired by the missing data importance-weighted autoencoder (MIWAE, Mattei & Frellsen, 2019), a framework to train DLVMs in MAR scenarios, based itself on the importance-weighted autoencoder (IWAE) of Burda et al. (2016). The general graphical model for the not-MIWAE is shown in figure 1a. The first part of the model is simply a latent variable model: there is a stochastic mapping parameterized by $\theta$ from a latent variable $z\sim p(z)$ to the data $x\sim p_{\theta}(x|z)$ and the data may be partially observed. The second part of the model, which we call the missing model, is a stochastic mapping from the data to the missing mask $s\sim p_{\phi}(s|x)$ . Explicit specification of the missing model $p_{\phi}(s|x)$ makes it possible to address MNAR issues.
|
| 37 |
+
|
| 38 |
+
The model can be trained efficiently by maximising a lower bound of the joint likelihood (of the observed features and missing pattern) obtained via importance weighted variational inference (Burda et al., 2016). A key difference with the MIWAE is that we use the reparameterization trick in the data space, as well as in the code space, in order to get stochastic gradients of the lower bound.
|
| 39 |
+
|
| 40 |
+
Missing processes affect data analysis in a wide range of domains and often the MAR assumption does not hold. We apply our method to censoring in datasets from the UCI database, clipping in images and the issue of selection bias in recommender systems.
|
| 41 |
+
|
| 42 |
+
# 2 BACKGROUND
|
| 43 |
+
|
| 44 |
+
Assume that the complete data are stored within a data matrix $\mathbf{X} = (\pmb{x}_1, \dots, \pmb{x}_n)^\top \in \mathcal{X}^n$ that contain $n$ i.i.d. copies of the random variable $\pmb{x} \in \mathcal{X}$ , where $\mathcal{X} = \mathcal{X}_1 \times \dots \times \mathcal{X}_p$ is a $p$ -dimensional feature space. For simplicity, $x_{ij}$ refers to the $j$ 'th feature of $\pmb{x}_i$ , and $x_i$ refers to the $i$ 'th sample in the data matrix. Throughout the text, we will make statements about the random variable $\pmb{x}$ , and only consider samples $x_i$ when necessary. In a missing data context, each sample can be split into an observed part and a missing part, $x_i = (x_i^0, x_i^m)$ . The pattern of missingness is individual to each copy of $x$ and described by a corresponding mask random variable $s \in \{0, 1\}^p$ . This leads to a mask matrix $\mathbf{S} = (s_1, \dots, s_n)^\top \in \{0, 1\}^{n \times p}$ verifying $s_{ij} = 1$ if $x_{ij}$ is observed and $s_{ij} = 0$ if $x_{ij}$ is missing.
|
| 45 |
+
|
| 46 |
+
We wish to construct a parametric model $p_{\theta, \phi}(\boldsymbol{x}, \boldsymbol{s})$ for the joint distribution of a single data point $\boldsymbol{x}$ and its mask $\boldsymbol{s}$ , which can be factored as
|
| 47 |
+
|
| 48 |
+
$$
|
| 49 |
+
p _ {\theta , \phi} (\boldsymbol {x}, \boldsymbol {s}) = p _ {\theta} (\boldsymbol {x}) p _ {\phi} (\boldsymbol {s} | \boldsymbol {x}). \tag {1}
|
| 50 |
+
$$
|
| 51 |
+
|
| 52 |
+
Here $p_{\phi}(s|\boldsymbol{x}) = p_{\phi}(s|\boldsymbol{x}^{\mathrm{o}}, \boldsymbol{x}^{\mathrm{m}})$ is the conditional distribution of the mask, which may depend on both the observed and missing data, through its own parameters $\phi$ . The three assumptions from the framework of Little & Rubin (2002) (see also Ghahramani & Jordan, 1995) pertain to the specific form of this conditional distribution:
|
| 53 |
+
|
| 54 |
+
MCAR: $p_{\phi}(\boldsymbol {s}|\boldsymbol {x}) = p_{\phi}(\boldsymbol {s}),$
|
| 55 |
+
MAR: $p_{\phi}(\boldsymbol {s}|\boldsymbol {x}) = p_{\phi}(\boldsymbol {s}|\boldsymbol{x}^{\mathrm{o}})$
|
| 56 |
+
- MNAR: $p_{\phi}(s|x)$ may depend on both $\mathbf{x}^{\mathrm{o}}$ and $\mathbf{x}^{\mathrm{m}}$ .
|
| 57 |
+
|
| 58 |
+
To maximize the likelihood of the parameters $(\theta, \phi)$ , based only on observed quantities, the missing data is integrated out from the joint distribution
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
p _ {\theta , \phi} \left(\boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {s}\right) = \int p _ {\theta} \left(\boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {x} ^ {\mathrm {m}}\right) p _ {\phi} \left(\boldsymbol {s} \mid \boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {x} ^ {\mathrm {m}}\right) \mathrm {d} \boldsymbol {x} ^ {\mathrm {m}}. \tag {2}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
In both the MCAR and MAR cases, inference for $\theta$ using the full likelihood becomes proportional to $p_{\theta ,\phi}(\boldsymbol{x}^{\mathrm{o}},\boldsymbol{s})\propto p_{\theta}(\boldsymbol{x}^{\mathrm{o}})$ , and the missing mechanism can be ignored while focusing only on $p_{\theta}(\boldsymbol{x}^{\mathrm{o}})$ . In the MNAR case, the missing mechanism can depend on both observed and missing data, offering no factorization of the likelihood in equation (2). The parameters of the data generating process and the parameters of the missing data mechanism are tied together by the missing data.
|
| 65 |
+
|
| 66 |
+
# 2.1 PPCA EXAMPLE
|
| 67 |
+
|
| 68 |
+
A linear DLVM with isotropic noise variance can be used to recover a model similar to probabilistic principal component analysis (PPCA, Roweis, 1998; Tipping & Bishop, 1999). In figure 1b, a dataset affected by an MNAR missing process is shown together with two fitted PPCA models, regular PPCA and the not-MIwAE formulated as a PPCA-like model. Data is generated from a multivariate normal distribution and an MNAR missing process is imposed by setting the horizontal coordinate to missing when it is larger than its mean, i.e. it becomes missing because of the value it would have had, had it been observed. Regular PPCA for missing data assumes that the missing mechanism is MAR so that the missing process is ignorable. This introduces a bias, both in the estimated mean and in the estimated principal signal direction of the data. The not-MIwAE PPCA assumes the missing mechanism is MNAR so the data generating process and missing data mechanism are modelled jointly as described in equation (2).
|
| 69 |
+
|
| 70 |
+
# 2.2 PREVIOUS WORK
|
| 71 |
+
|
| 72 |
+
In (Rubin, 1976) the appropriateness of ignoring the missing process when doing likelihood based or Bayesian inference was introduced and formalized. The introduction of the EM algorithm (Dempster et al., 1977) made it feasible to obtain maximum likelihood estimates in many missing data settings, see e.g. Ghahramani & Jordan (1994; 1995); Little & Rubin (2002). Sampling methods such as Markov chain Monte Carlo have made it possible to sample a target posterior in Bayesian models, including the missing data, so that parameter marginal distributions and missing data marginal distributions are available directly (Gelman et al., 2013). This is also the starting point of the multiple imputations framework of Rubin (1977; 1996). Here the samples of the missing data are used to provide several realisations of complete datasets where complete-data methods can be applied to get combined mean and variability estimates.
|
| 73 |
+
|
| 74 |
+
The framework of Little & Rubin (2002) is instructive in how to handle MNAR problems and a recent review of MNAR methods can be found in (Tang & Ju, 2018). Low rank models were used for estimation and imputation in MNAR settings by Sportisse et al. (2020a). Two approaches were taken to fitting models, 1) maximising the joint distribution of data and missing mask using an EM algorithm, and 2) implicitly modelling the joint distribution by concatenating the data matrix and the missing mask and working with this new matrix. This implies a latent representation both giving rise to the data and the mask. An overview of estimation methods for PCA and PPCA with missing data was given by Ilin & Raiko (2010), while PPCA in the presence of an MNAR missing mechanism has been addressed by Sportisse et al. (2020b). There has been some focus on MNAR issues in the form of selection bias within the recommender system community (Marlin et al., 2007; Marlin & Zemel, 2009; Steck, 2013; Hernandez-Lobato et al., 2014; Schnabel et al., 2016; Wang et al., 2019) where methods applied range from joint modelling of data and missing model using multinomial mixtures and matrix factorization to debiasing existing methods using propensity based techniques from causality.
|
| 75 |
+
|
| 76 |
+
Deep latent variable models are intuitively appealing in a missing context: the generative part of the model can be used to sample the missing part of an observation. This was already utilized by Rezende et al. (2014) to do imputation and denoising by sampling from a Markov chain whose stationary distribution is approximately the conditional distribution of the missing data given the observed. This procedure has been enhanced by Mattei & Frellsen (2018a) using Metropolis-within-Gibbs. In both cases the experiments were assuming MAR and a fitted model, based on complete data, was already available.
|
| 77 |
+
|
| 78 |
+
Approaches to fitting DLVMs in the presence of missing have recently been suggested, such as the HI-VAE by Nazabal et al. (2020) using an extension of the variational autoencoder (VAE) lower bound, the p-VAE by Ma et al. (2018; 2019) using the VAE lower bound and a permutation invariant encoder, the MIWAE by Mattei & Frellsen (2019), extending the IwAE lower bound (Burda et al., 2016), and GAIN (Yoon et al., 2018) using GANs for missing data imputation. All approaches are assuming that the missing process is MAR or MCAR. In (Gong et al., 2020), the data and missing mask are modelled together, as both being generated by a mapping from the same latent space, thereby tying the data model and missing process together. This gives more flexibility in terms of missing process assumptions, akin to the matrix factorization approach by Sportisse et al. (2020a).
|
| 79 |
+
|
| 80 |
+
In concurrent work, Collier et al. (2020) have developed a deep generative model of the observed data conditioned on the mask random variable, and Lim et al. (2021) apply a model similar to the not-MIwAE to electronic health records data. In forthcoming work, Ghalebikesabi et al. (2021) propose a deep generative model for non-ignorable missingness building on ideas from VAEs and pattern-set mixture models.
|
| 81 |
+
|
| 82 |
+
# 3 INFERENCE IN DLVMS AFFECTED BY MNAR
|
| 83 |
+
|
| 84 |
+
In an MNAR setting, the parameters for the data generating process and the missing data mechanism need to be optimized jointly using all observed quantities. The relevant quantity to maximize is therefore the log-(joint) likelihood
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
\ell (\theta , \phi) = \sum_ {i = 1} ^ {n} \log p _ {\theta , \phi} \left(\boldsymbol {x} _ {i} ^ {0}, \boldsymbol {s} _ {i}\right), \tag {3}
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
where we can rewrite the general contribution of data points $\log p_{\theta, \phi}(\pmb{x}^{\mathrm{o}}, \pmb{s})$ as
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
\log \int p _ {\phi} (\boldsymbol {s} | \boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {x} ^ {\mathrm {m}}) p _ {\theta} (\boldsymbol {x} ^ {\mathrm {o}} | \boldsymbol {z}) p _ {\theta} (\boldsymbol {x} ^ {\mathrm {m}} | \boldsymbol {z}) p (\boldsymbol {z}) \mathrm {d} \boldsymbol {z} \mathrm {d} \boldsymbol {x} ^ {\mathrm {m}}, \tag {4}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
using the assumption that the observation model is fully factorized $p_{\theta}(\pmb{x}|\pmb{z}) = \prod_j p_{\theta}(x_j|\pmb{z})$ , which implies $p_{\theta}(\pmb{x}|\pmb{z}) = p(\pmb{x}^{\mathrm{o}}|\pmb{z})p_{\theta}(\pmb{x}^{\mathrm{m}}|\pmb{z})$ . The integrals over missing and latent variables make direct maximum likelihood intractable. However, the approach of Burda et al. (2016), using an inference network and importance sampling to derive a more tractable lower bound of $\ell(\theta, \phi)$ , can be used here as well. The key idea is to posit a conditional distribution $q_{\gamma}(\mathbf{z}|\pmb{x}^{\mathrm{o}})$ called the variational distribution that will play the role of a learnable proposal in an importance sampling scheme.
|
| 97 |
+
|
| 98 |
+
As in VAEs (Kingma & Welling, 2013; Rezende et al., 2014) and IwAEs (Burda et al., 2016), the distribution $q_{\gamma}(\mathbf{z}|\boldsymbol{x}^{\mathrm{o}})$ comes from a simple family (e.g. the Gaussian or Student's $t$ family) and its parameters are given by the output of a neural network (called inference network or encoder) that takes $\boldsymbol{x}^{\mathrm{o}}$ as input. The issue is that a neural net cannot readily deal with variable length inputs (which is the case of $\boldsymbol{x}^{\mathrm{o}}$ ). This was tackled by several works: Nazabal et al. (2020) and Mattei & Frellsen (2019) advocated simply zero-imputing $\boldsymbol{x}^{\mathrm{o}}$ to get inputs with constant length, and Ma et al. (2018; 2019) used a permutation-invariant network able to deal with inputs with variable length.
|
| 99 |
+
|
| 100 |
+
Introducing the variational distribution, the contribution of a single observation is equal to
|
| 101 |
+
|
| 102 |
+
$$
|
| 103 |
+
\begin{array}{l} \log p _ {\theta , \phi} \left(\boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {s}\right) = \log \int \frac {p _ {\phi} \left(\boldsymbol {s} \mid \boldsymbol {x} ^ {\mathrm {o}} , \boldsymbol {x} ^ {\mathrm {m}}\right) p _ {\theta} \left(\boldsymbol {x} ^ {\mathrm {o}} \mid \boldsymbol {z}\right) p (\boldsymbol {z})}{q _ {\gamma} (\boldsymbol {z} \mid \boldsymbol {x} ^ {\mathrm {o}})} q _ {\gamma} (\boldsymbol {z} \mid \boldsymbol {x} ^ {\mathrm {o}}) p _ {\theta} \left(\boldsymbol {x} ^ {\mathrm {m}} \mid \boldsymbol {z}\right) d \boldsymbol {x} ^ {\mathrm {m}} d \boldsymbol {z} (5) \\ = \log \mathbb {E} _ {\boldsymbol {z} \sim q _ {\gamma} (\boldsymbol {z} | \boldsymbol {x} ^ {\mathrm {o}}), \boldsymbol {x} ^ {\mathrm {m}} \sim p _ {\theta} (\boldsymbol {x} ^ {\mathrm {m}} | \boldsymbol {z})} \left[ \frac {p _ {\phi} (\boldsymbol {s} | \boldsymbol {x} ^ {\mathrm {o}} , \boldsymbol {x} ^ {\mathrm {m}}) p _ {\theta} (\boldsymbol {x} ^ {\mathrm {o}} | \boldsymbol {z}) p (\boldsymbol {z})}{q _ {\gamma} (\boldsymbol {z} | \boldsymbol {x} ^ {\mathrm {o}})} \right]. (6) \\ \end{array}
|
| 104 |
+
$$
|
| 105 |
+
|
| 106 |
+
The main idea of importance weighed variational inference and of the IwAE is to replace the expectation inside the logarithm by a Monte Carlo estimate of it (Burda et al., 2016). This leads to the objective function
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
\mathcal {L} _ {K} (\theta , \phi , \gamma) = \sum_ {i = 1} ^ {n} \mathbb {E} \left[ \log \frac {1}{K} \sum_ {k = 1} ^ {K} w _ {k i} \right], \tag {7}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
where, for all $k \leq K$ , $i \leq n$
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
w _ {k i} = \frac {p _ {\phi} \left(\boldsymbol {s} _ {i} \mid \boldsymbol {x} _ {i} ^ {\mathrm {o}} , \boldsymbol {x} _ {k i} ^ {\mathrm {m}}\right) p _ {\theta} \left(\boldsymbol {x} _ {i} ^ {\mathrm {o}} \mid \boldsymbol {z} _ {k i}\right) p \left(\boldsymbol {z} _ {k i}\right)}{q _ {\gamma} \left(\boldsymbol {z} _ {k i} \mid \boldsymbol {x} _ {i} ^ {\mathrm {o}}\right)}, \tag {8}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
and $(z_{1i},x_{1i}^{\mathrm{m}}),\ldots ,(z_{Ki},x_{Ki}^{\mathrm{m}})$ are $K$ i.i.d. samples from $q_{\gamma}(z|x_i^0)p_\theta (x^{\mathrm{m}}|z)$ , over which the expectation in equation (7) is taken. The unbiasedness of the Monte Carlo estimates ensures (via Jensen's inequality) that the objective is indeed a lower-bound of the likelihood. Actually, under the moment conditions of (Domke & Sheldon, 2018, Theorem 3), which we detail in Appendix D, it is possible to show that the sequence $(\mathcal{L}_K(\theta ,\phi ,\gamma))_{K\geq 1}$ converges monotonically (Burda et al., 2016, Theorem 1) to the likelihood:
|
| 119 |
+
|
| 120 |
+
$$
|
| 121 |
+
\mathcal {L} _ {1} (\theta , \phi , \gamma) \leq \dots \leq \mathcal {L} _ {K} (\theta , \phi , \gamma) \xrightarrow [ K \rightarrow \infty ]{} \ell (\theta , \phi). \tag {9}
|
| 122 |
+
$$
|
| 123 |
+
|
| 124 |
+
Properties of the not-MIwAE objective The bound $\mathcal{L}_K(\theta, \phi, \gamma)$ has essentially the same properties as the (M)IWAE bounds, see Mattei & Frellsen, 2019, Section 2.4 for more details. The key difference is that we are integrating over both the latent space and part of the data space. This means that, to obtain unbiased estimates of gradients of the bound, we will need to backpropagate through samples from $q_{\gamma}(z|\pmb{x}_i^o)p_{\theta}(\pmb{x}^m|\pmb{z})$ . A simple way to do this is to use the reparameterization trick both for $q_{\gamma}(z|\pmb{x}_i^o)$ and $p_{\theta}(\pmb{x}^m|\pmb{z})$ . This is the approach that we chose in our experiments. The main limitation is that $p_{\theta}(\pmb{x}|\pmb{z})$ has to belong to a reparameterizable family, like Gaussians or Student's $t$ distributions (see Figurnov et al., 2018 for a list of available distributions). If the distribution is not readily reparametrisable (e.g. if the data are discrete), several other options are available, see e.g. the review of Mohamed et al. (2020), and, in the discrete case, the continuous relaxations of Jang et al. (2017) and Maddison et al. (2017).
|
| 125 |
+
|
| 126 |
+
Imputation When the model has been trained, it can be used to impute missing values. If our performance metric is a loss function $L(\pmb{x}^{\mathrm{m}}, \hat{\pmb{x}}^{\mathrm{m}})$ , optimal imputations $\hat{\pmb{x}}^{\mathrm{m}}$ minimise $\mathbb{E}_{\pmb{x}^{\mathrm{m}}} [L(\pmb{x}^{\mathrm{m}}, \hat{\pmb{x}}^{\mathrm{m}})|\pmb{x}^{\mathrm{o}}, s]$ . When $L$ is the squared error, the optimal imputation is the conditional mean that can be estimated via self-normalised importance sampling (Mattei & Frellsen, 2019), see appendix B for more details.
|
| 127 |
+
|
| 128 |
+
# 3.1 USING PRIOR INFORMATION VIA THE MISSING DATA MODEL
|
| 129 |
+
|
| 130 |
+
The missing data mechanism can both be known/decided upon in advance (so that the full relationship $p_{\phi}(s|x)$ is fixed and no parameters need to be learned) or the type of missing mechanism can be known (but the parameters need to be learnt) or it can be unknown both in terms of parameters and model. The more we know about the nature of the missing mechanism, the more information we can put into designing the missing model. This in turn helps inform the data model how its parameters should be modified so as to accommodate the missing model. This is in line with the findings of Molenberghs et al. (2008), who showed that, for MNAR modelling to work, one has to leverage prior knowledge about the missing process. A crucial issue is under what model assumptions the full data distribution can be recovered from incomplete sample. Indeed, some general missing models may lead to inconsistent statistical estimation (see e.g. Mohan & Pearl, 2021; Nabi et al., 2020).
|
| 131 |
+
|
| 132 |
+
The missing model is essentially solving a classification problem; based on the observed data and the output from the data model filling in the missing data, it needs to improve its "accuracy" in predicting the mask. A Bernoulli distribution is used for the probability of the mask given both observed and missing data
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
p _ {\phi} (\boldsymbol {s} | \boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {x} ^ {\mathrm {m}}) = p _ {\phi} (\boldsymbol {s} | \boldsymbol {x}) = \operatorname {B e r n} (\boldsymbol {s} | \pi_ {\phi} (\boldsymbol {x})) = \prod_ {j = 1} ^ {p} \pi_ {\phi , j} (\boldsymbol {x}) ^ {s _ {j}} (1 - \pi_ {\phi , j} (\boldsymbol {x})) ^ {1 - s _ {j}}. \tag {10}
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
Here $\pi_j$ is the estimated probability of being observed for that particular observation for feature $j$ . The mapping $\pi_{\phi,j}(\pmb{x})$ from the data to the probability of being observed for the $j$ 'th feature can be as general or specific as needed. A simple example could be that of self-masking or self-censoring, where the probability of the $j$ 'th feature being observed is only dependent on the feature value, $x_j$ . Here the mapping can be a sigmoid on a linear mapping of the feature value, $\pi_{\phi,j}(\pmb{x}) = \sigma(ax_j + b)$ . The missing model can also be based on a group theoretic approach, see appendix C.
|
| 139 |
+
|
| 140 |
+
# 4 EXPERIMENTS
|
| 141 |
+
|
| 142 |
+
In this section we apply the not-MIwAE to problems with values MNAR: censoring in multivariate datasets, clipping in images and selection bias in recommender systems. Implementation details and a link to source code can be found in appendix A.
|
| 143 |
+
|
| 144 |
+
<table><tr><td></td><td>Banknote</td><td>Concrete</td><td>Red</td><td>White</td><td>Yeast</td><td>Breast</td></tr><tr><td>PPCA</td><td>1.39 ± 0.00</td><td>1.61 ± 0.00</td><td>1.61 ± 0.00</td><td>1.57 ± 0.00</td><td>1.67 ± 0.00</td><td>0.90 ± 0.00</td></tr><tr><td>not-MIwAE - PPCA</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>agnostic</td><td>1.25 ± 0.15</td><td>1.47 ± 0.01</td><td>1.32 ± 0.00</td><td>1.27 ± 0.01</td><td>1.20 ± 0.05</td><td>0.78 ± 0.00</td></tr><tr><td>self-masking</td><td>0.57 ± 0.00</td><td>1.31 ± 0.00</td><td>1.13 ± 0.00</td><td>0.99 ± 0.00</td><td>0.78 ± 0.00</td><td>0.72 ± 0.00</td></tr><tr><td>self-masking known</td><td>0.57 ± 0.00</td><td>1.31 ± 0.00</td><td>1.13 ± 0.00</td><td>0.99 ± 0.00</td><td>0.77 ± 0.00</td><td>0.72 ± 0.00</td></tr><tr><td>MIWAE</td><td>1.19 ± 0.01</td><td>1.66 ± 0.01</td><td>1.62 ± 0.01</td><td>1.55 ± 0.01</td><td>1.72 ± 0.01</td><td>1.20 ± 0.01</td></tr><tr><td>not-MIwAE</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>agnostic</td><td>0.80 ± 0.08</td><td>2.63 ± 0.12</td><td>1.30 ± 0.01</td><td>1.37 ± 0.00</td><td>1.43 ± 0.02</td><td>1.10 ± 0.01</td></tr><tr><td>self-masking</td><td>1.88 ± 0.85</td><td>1.26 ± 0.02</td><td>1.08 ± 0.02</td><td>1.04 ± 0.01</td><td>1.48 ± 0.03</td><td>0.74 ± 0.01</td></tr><tr><td>self-masking known</td><td>0.74 ± 0.05</td><td>1.12 ± 0.04</td><td>1.07 ± 0.00</td><td>1.04 ± 0.00</td><td>1.38 ± 0.02</td><td>0.76 ± 0.01</td></tr><tr><td>low-rank joint model</td><td>0.79 ± 0.02</td><td>1.57 ± 0.01</td><td>1.42 ± 0.01</td><td>1.39 ± 0.01</td><td>1.19 ± 0.00</td><td>1.22 ± 0.01</td></tr><tr><td>missForest</td><td>1.28 ± 0.00</td><td>1.76 ± 0.01</td><td>1.64 ± 0.00</td><td>1.63 ± 0.00</td><td>1.66 ± 0.00</td><td>1.57 ± 0.00</td></tr><tr><td>MICE</td><td>1.41 ± 0.00</td><td>1.70 ± 0.00</td><td>1.68 ± 0.00</td><td>1.41 ± 0.00</td><td>1.72 ± 0.00</td><td>1.17 ± 0.00</td></tr><tr><td>mean</td><td>1.73 ± 0.00</td><td>1.85 ± 0.00</td><td>1.83 ± 0.00</td><td>1.74 ± 0.00</td><td>1.69 ± 0.00</td><td>1.82 ± 0.00</td></tr></table>
|
| 145 |
+
|
| 146 |
+
Table 1: Imputation RMSE on UCI datasets affected by MNAR.
|
| 147 |
+
|
| 148 |
+
# 4.1 EVALUATION METRICS
|
| 149 |
+
|
| 150 |
+
Model performance can be assessed using different metrics. A first metric would be to look at how well the marginal distribution of the data has been inferred. This can be assessed, if we happen to have a fully observed test-set available. Indeed, we can look at the test log-likelihood of this fully observed test-set as a measure of how close $p_{\theta}(\pmb{x})$ and the true distribution of $\pmb{x}$ are. In the case of a DLVM, performance can be estimated using importance sampling with the variational distribution as proposal (Rezende et al., 2014). Since the encoder is tuned to observations with missing data, it should be retrained (while keeping the decoder fixed) as suggested by Mattei & Frellsen (2018b).
|
| 151 |
+
|
| 152 |
+
Another metric of interest is the imputation error. In experimental settings where the missing mechanism is under our control, we have access to the actual values of the missing data and the imputation error can be found directly as an error measure between these and the reconstructions from the model. In real-world datasets affected by MNAR processes, we cannot use the usual approach of doing a train-test split of the observed data. As the test-set is biased by the same missing mechanism as the training-set it is not representative of the full population. Here we need a MAR data sample to evaluate model performance (Marlin et al., 2007).
|
| 153 |
+
|
| 154 |
+
# 4.2 SINGLE IMPUTATION IN UCI DATA SETS AFFECTED BY MNAR
|
| 155 |
+
|
| 156 |
+
We compare different imputation techniques on datasets from the UCI database (Dua & Graff, 2017), where in an MCAR setting the MIWAE has shown state of the art performance (Mattei & Frellsen, 2019). An MNAR missing process is introduced by self-masking in half of the features: when the feature value is higher than the feature mean it is set to missing. The MIWAE and not-MIwAE, as well as their linear PPCA-like versions, are fitted to the data with missing values. For the not-MIwAE three different approaches to the missing model are used: 1) agnostic where the data model output is mapped to logits for the missing process via a single dense linear layer, 2) self-masking where logistic regression is used for each feature and 3) self-masking known where the sign of the weights in the logistic regression is known.
|
| 157 |
+
|
| 158 |
+
We compare to the low-rank approximation of the concatenation of data and mask by Sportisse et al. (2020a) that is implicitly modelling the data and mask jointly. Furthermore we compare to mean imputation, missForest (Stekhoven & Buhlmann, 2012) and MICE (Buuren & Groothuis-Oudshoorn, 2010) using Bayesian Ridge regression. Similar settings are used for the MIWAE and not-MIwAE, see appendix A. Results over 5 runs are seen in table 1. Results for varying missing rates are in appendix E.
|
| 159 |
+
|
| 160 |
+
The low-rank joint model is almost always better than PPCA, missForest, MICE and mean, i.e. all M(C)AR approaches, which can be attributed to the implicit modelling of data and mask together. At the same time the not-MIwAE PPCA is always better than the corresponding low-rank joint model, except for the agnostic missing model on the Yeast dataset. Supplying the missing model with more knowledge of the missing process (that it is self-masking and the direction of the missing mechanism) improves performance. The not-MIwAE performance is also improved with more knowledge in the missing model. The agnostic missing process can give good performance, but is
|
| 161 |
+
|
| 162 |
+

|
| 163 |
+
(a) MIWAE
|
| 164 |
+
|
| 165 |
+

|
| 166 |
+
(b) not-MIwAE
|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
(c) missing data
|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
Figure 2: SVHN: Histograms over imputed values for (a) the MIWAE and (b) the not-MIwAE, and (c) the pixel values of the missing data.
|
| 173 |
+
Figure 3: Rows from top: original images, images with missing, not-MIwAE imputations, MIwAE imputations
|
| 174 |
+
|
| 175 |
+
<table><tr><td>Model</td><td>RMSE</td><td>\( {\mathcal{L}}_{10000}^{\text{test }} \)</td></tr><tr><td>MIWAE</td><td>0.17298</td><td>1867.66</td></tr><tr><td>not-MIWAE</td><td>0.07294</td><td>1894.36</td></tr><tr><td>MIWAE no missing</td><td></td><td>1908.11</td></tr></table>
|
| 176 |
+
|
| 177 |
+
Table 2: SVHN: Imputation RMSE and test-set log-likelihood estimate. Constant imputation with 1's has a RMSE of 0.1757.
|
| 178 |
+
|
| 179 |
+
often led astray by an incorrectly learned missing model. This speaks to the trade-off between data model flexibility and missing model flexibility. The not-MIwAE PPCA has huge inductive bias in the data model and so we can employ a more flexible missing model and still get good results. For the not-MIWAE having both a flexible data model and a flexible missing model can be detrimental to performance. One way to assess the learnt missing processes is the mask classification accuracy on fully observed data. These are reported in table A1 and show that the accuracy increases as more information is put into the missing model.
|
| 180 |
+
|
| 181 |
+
# 4.3 CLIPPING IN SVHN IMAGES
|
| 182 |
+
|
| 183 |
+
We emulate the clipping phenomenon in images on the street view house numbers dataset (SVHN, Netzer et al., 2011). Here we introduce a self-masking missing mechanism that is identical for all pixels. The missing data is Bernoulli sampled with probability
|
| 184 |
+
|
| 185 |
+
$$
|
| 186 |
+
\Pr \left(s _ {i j} = 1 \mid x _ {i j}\right) = \frac {1}{1 + e ^ {- \log_ {\mathrm {i t s}}}} \text {, l o g i t s} = W \left(x _ {i j} - b\right), \tag {11}
|
| 187 |
+
$$
|
| 188 |
+
|
| 189 |
+
where $W = -50$ and $b = 0.75$ . This mimicks a clipping process where 0.75 is the clipping point (the data is converted to gray scale in the [0, 1] range). For this experiment we use the true missing process as the missing model in the not-MIwAE.
|
| 190 |
+
|
| 191 |
+
Table 2 shows model performance in terms of imputation RMSE and test-set log likelihood as estimated with 10k importance samples. The not-MIwAE outperforms the MIwAE both in terms of test-set log likelihood and imputation RMSE. This is further illustrated in the imputations shown in figure 3. Since the MIwAE is only fitting the observed data, the range of pixel values in the imputations is limited compared to the true range. The not-MIwAE is forced to push some of the data-distribution towards higher pixel values, in order to get a higher likelihood in the logistic regression in the missing model. In figures 2a-2c, histograms over the imputation values are shown together with the true pixel values of the missing data. Here we see that the not-MIwAE puts a considerable amount of probability mass above the clipping value.
|
| 192 |
+
|
| 193 |
+
# 4.4 SELECTION BIAS IN THE YAHOO! R3 DATASET
|
| 194 |
+
|
| 195 |
+
The Yahoo! R3 dataset (webscape.sandbox.yahoo.com) contains ratings on a scale from 1-5 of songs in the database of the Yahoo! LaunchCast internet radio service and was first presented in (Marlin et al., 2007). It consists of two datasets with the same 1,000 songs selected randomly from
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
(a) MNAR train samples
|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
Figure 4: Histograms over rating values for the Yahoo! R3 dataset from (a) the MNAR training set and (b) the MCAR test set. (c) and (d) show histograms over imputations of missing values in the test set, when encoding the corresponding training set. The not-MIwAE imputations (d) are much more faithful to the shape of the test set (b) than the MIwAE imputations (c).
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
(b) MCAR test samples
|
| 205 |
+
(c) MIWAE impute
|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
(d) not-MIWAE impute
|
| 209 |
+
|
| 210 |
+
the LaunchCast database. The first dataset is considered an MNAR training set and contains self-selected ratings from 15,400 users. In the second dataset, considered an MCAR test-set, 5,400 of these users were asked to rate exactly 10 randomly selected songs. This gives a unique opportunity to train a model on a real-world MNAR-affected dataset while being able to get an unbiased estimate of the imputation error, due to the availability of MCAR ratings. The plausibility that the set of self-selected ratings was subject to an MNAR missing process was explored and substantiated by Marlin et al. (2007). The marginal distributions of samples from the self-selected dataset and the randomly selected dataset can be seen in figures 4a and 4b.
|
| 211 |
+
|
| 212 |
+
We train the MIWAE and the not-MIWAE on the MNAR ratings and evaluate the imputation error on the MCAR ratings. Both a gaussian and a categorical observation model is explored. In order to get reparameterized samples in the data space for the categorical observation model, we use the Gumbel-Softmax trick (Jang et al., 2017) with a temperature of 0.5. The missing model is a logistic regression for each item/feature, with a shared weight across features and individual biases. A description of competitors can be found in appendix A.3 and follows the setup in (Wang et al., 2019). The results are grouped in table 3, from top to bottom, according to models not including the missing process (MAR approaches), models using propensity scoring techniques to debias training losses, and finally models learning a data model and a missing model jointly, without the use of propensity estimates.
|
| 213 |
+
|
| 214 |
+
The not-MIwAE shows state of the art performance, also compared to models based on propensity scores. The propensity based techniques need access to a small sample of MCAR data, i.e. a part of the test-set, to estimate the propensities using Naive Bayes, though they can be estimated using logistic regression if covariates are available (Schnabel et al., 2016) or using a nuclear-norm-constrained matrix factorization of the missing mask itself (Ma & Chen, 2019). We stress that the not-MIwAE does not need access to similar unbiased data in order to learn the missing model. However, the missing model in the not-MIwAE can take available information into account, e.g. we could fit a continuous mapping to the propensities and use this as the missing model, if propensities were available. Histograms over imputations for the missing data in the MCAR test-set can be seen for the MIwAE and not-MIwAE in figures 4c and 4d. The marginal distribution of the not-MIwAE imputations are seen to match that of the MCAR test-set better than the marginal distribution of the MIwAE imputations.
|
| 215 |
+
|
| 216 |
+
<table><tr><td>Model</td><td>MSE</td></tr><tr><td>MF</td><td>1.891</td></tr><tr><td>PMF</td><td>1.709</td></tr><tr><td>AutoRec</td><td>1.438</td></tr><tr><td>Gaussian-VAE</td><td>1.381</td></tr><tr><td>MIWAE categorical</td><td>2.067 ± 0.004</td></tr><tr><td>MIWAE Gaussian</td><td>2.055 ± 0.001</td></tr><tr><td>CPT-v</td><td>1.115</td></tr><tr><td>MF-IPS</td><td>0.989</td></tr><tr><td>MF-DR-JL</td><td>0.966</td></tr><tr><td>NFM-DR-JL</td><td>0.957</td></tr><tr><td>MF-MNAR</td><td>2.199</td></tr><tr><td>Logit-vd</td><td>1.301</td></tr><tr><td>not-MIwAE categorical</td><td>1.293 ± 0.006</td></tr><tr><td>not-MIwAE gaussian</td><td>0.939 ± 0.007</td></tr></table>
|
| 217 |
+
|
| 218 |
+
Table 3: Imputation MSEs for the Yahoo! MCAR test-set. Models are trained on the MNAR training set.
|
| 219 |
+
|
| 220 |
+
# 5 CONCLUSION
|
| 221 |
+
|
| 222 |
+
The proposed not-MIWAE is versatile both in terms of defining missing mechanisms and in terms of application area. There is a trade-off between data model complexity and missing model complexity. In a parsimonious data model a very general missing process can be used while in flexible data
|
| 223 |
+
|
| 224 |
+
model the missing model needs to be more informative. Specifically, any knowledge about the missing process should be incorporated in the missing model to improve model performance. Doing so using recent advances in equivariant/invariant neural networks is an interesting avenue for future research (see appendix C). Recent developments on the subject of recoverability/identifiability of MNAR models (Sadinle & Reiter, 2018; Mohan & Pearl, 2021; Nabi et al., 2020; Sportisse et al., 2020b) could also be leveraged to design provably idenfiable not-MIwAE models.
|
| 225 |
+
|
| 226 |
+
Several extensions of the graphical models used here could be explored. For example, one could break off the conditional independence assumptions, in particular the one of the mask given the data. This could, for example, be done by using an additional latent variable pointing directly to the mask. Combined with a discriminative classifier, the not-MIwAE model could also be used in supervised learning with input values missing not at random following the techniques by Ipsen et al. (2020).
|
| 227 |
+
|
| 228 |
+
# ACKNOWLEDGMENTS
|
| 229 |
+
|
| 230 |
+
The Danish Innovation Foundation supported this work through Danish Center for Big Data Analytics driven Innovation (DABAI). JF acknowledge funding from the Independent Research Fund Denmark (grant number 9131-00082B) and the Novo Nordisk Foundation (grant numbers NNF20OC0062606 and NNF20OC0065611).
|
| 231 |
+
|
| 232 |
+
# REFERENCES
|
| 233 |
+
|
| 234 |
+
Alberto Bietti and Julien Mairal. Invariance and stability of deep convolutional representations. In Advances in Neural Information Processing Systems, pp. 6210-6220, 2017.
|
| 235 |
+
Benjamin Bloem-Reddy and Yee Whye Teh. Probabilistic symmetries and invariant neural networks. Journal of Machine Learning Research, 21(90):1-61, 2020.
|
| 236 |
+
Yuri Burda, Roger Grosse, and Ruslan Salakhutdinov. Importance weighted autoencoders. In International Conference on Learning Representations, 2016.
|
| 237 |
+
Stef van Buuren and Karin Groothuis-Oudshoorn. mice: Multivariate imputation by chained equations in R. Journal of Statistical Software, pp. 1-68, 2010.
|
| 238 |
+
Taco S. Cohen, Mario Geiger, and Maurice Weiler. A general theory of equivariant CNNs on homogeneous spaces. In Advances in Neural Information Processing Systems, volume 32, 2019.
|
| 239 |
+
Mark Collier, Alfredo Nazabal, and Chris Williams. VAEs in the presence of missing data. In the First ICML Workshop on The Art of Learning with Missing Values Artemiss (ARTEMISS), 2020.
|
| 240 |
+
Arthur P. Dempster, Nan M. Laird, and Donald B. Rubin. Maximum likelihood from incomplete data via the EM algorithm. Journal of the Royal Statistical Society: Series B (Methodological), 39(1):1-22, 1977.
|
| 241 |
+
Joshua V Dillon, Ian Langmore, Dustin Tran, Eugene Brevdo, Srinivas Vasudevan, Dave Moore, Brian Patton, Alex Alemi, Matt Hoffman, and Rif A Saurous. Tensorflow distributions. arXiv preprint arXiv:1711.10604, 2017.
|
| 242 |
+
Justin Domke and Daniel Sheldon. Importance weighting and varational inference. In Advances in Neural Information Processing Signals, volume 31, 2018.
|
| 243 |
+
Marco Doretti, Sara Geneletti, and Elena Stanghellini. Missing data: a unified taxonomy guided by conditional independence. International Statistical Review, 86(2):189-204, 2018.
|
| 244 |
+
Dheeru Dua and Casey Graff. UCI machine learning repository, 2017. URL http://archive.ics.uci.edu/ml.
|
| 245 |
+
Michael Figurnov, Shakir Mohamed, and Andriy Mnih. Implicit reparameterization gradients. Advances in Neural Information Processing Signals, pp. 439-450, 2018.
|
| 246 |
+
Andrew Gelman, John B. Carlin, Hal S. Stern, David B. Dunson, Aki Vehtari, and Donald B. Rubin. Bayesian data analysis. Chapman and Hall/CRC, 2013.
|
| 247 |
+
|
| 248 |
+
Zoubin Ghahramani and Michael I Jordan. Supervised learning from incomplete data via an EM approach. In Advances in Neural Information Processing Systems, pp. 120-127, 1994.
|
| 249 |
+
Zoubin Ghahramani and Michael I. Jordan. Learning from incomplete data. Technical Report AIM-1509CBCL-108, Massachusetts Institute of Technology, 1995.
|
| 250 |
+
Sahra Ghalebikesabi, Rob Cornish, Luke J. Kelly, and Chris Holmes. Deep generative pattern-set mixture models for nonignorable missingness. arXiv preprint arXiv:2103.03532, 2021.
|
| 251 |
+
Peter W. Glynn. Importance sampling for Monte Carlo estimation of quantiles. In *Mathematical Methods in Stochastic Simulation and Experimental Design: Proceedings of the 2nd St. Petersburg Workshop on Simulation*, pp. 180–185. Publishing House of St. Petersburg University, 1996.
|
| 252 |
+
Yu Gong, Hossein Hajimirsadeghi, Jiawei He, Megha Nawhal, Thibaut Durand, and Greg Mori. Variational selective autoencoder. In Proceedings of The 2nd Symposium on Advances in Approximate Bayesian Inference, volume 118 of Proceedings of Machine Learning Research, pp. 1-17. PMLR, 2020.
|
| 253 |
+
Xiangnan He and Tat-Seng Chua. Neural factorization machines for sparse predictive analytics. In Proceedings of the 40th International ACM SIGIR conference on Research and Development in Information Retrieval, pp. 355-364, 2017.
|
| 254 |
+
Jose Miguel Hernández-Lobato, Neil Houlsby, and Zoubin Ghahramani. Probabilistic matrix factorization with non-random missing data. In International Conference on Machine Learning, pp. 1512-1520, 2014.
|
| 255 |
+
Alexander Ilin and Tapani Raiko. Practical approaches to principal component analysis in the presence of missing values. Journal of Machine Learning Research, 11(Jul):1957-2000, 2010.
|
| 256 |
+
Niels Bruun Ipsen, Pierre-Alexandre Mattei, and Jes Frellsen. How to deal with missing data in supervised deep learning? In the First ICML Workshop on The Art of Learning with Missing Values Artemiss (ARTEMISS), 2020.
|
| 257 |
+
Oleg Ivanov, Michael Figurnov, and Dmitry Vetrov. Variational autoencoder with arbitrary conditioning. In International Conference on Learning Representations, 2019.
|
| 258 |
+
Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with Gumbel-softmax. In International Conference on Learning Representations, 2017.
|
| 259 |
+
Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In International Conference on Learning Representations, 2014.
|
| 260 |
+
Diederik P. Kingma and Max Welling. Auto-encoding variational Bayes. In International Conference on Learning Representations, 2013.
|
| 261 |
+
Yehuda Koren, Robert Bell, and Chris Volinsky. Matrix factorization techniques for recommender systems. Computer, 42(8):30-37, 2009.
|
| 262 |
+
Dawen Liang, Rahul G. Krishnan, Matthew D. Hoffman, and Tony Jebara. Variational autoencoders for collaborative filtering. In Proceedings of the 2018 World Wide Web Conference, pp. 689-698. International World Wide Web Conferences Steering Committee, 2018.
|
| 263 |
+
David K. Lim, Naim U. Rashid, Junier B. Oliva, and Joseph G. Ibrahim. Handling non-ignorably missing features in electronic health records data using importance-weighted autoencoders. arXiv preprint arXiv:2101.07357, 2021.
|
| 264 |
+
Roderick J. A. Little and Donald B. Rubin. Statistical analysis with missing data. John Wiley & Sons, 2002.
|
| 265 |
+
Chao Ma, Wenbo Gong, José Miguel Hernández-Lobato, Noam Koenigstein, Sebastian Nowozin, and Cheng Zhang. Partial VAE for hybrid recommender system. In NIPS Workshop on Bayesian Deep Learning, 2018.
|
| 266 |
+
|
| 267 |
+
Chao Ma, Sebastian Tschiatschek, Konstantina Palla, Jose Miguel Hernandez-Lobato, Sebastian Nowozin, and Cheng Zhang. EDDI: Efficient dynamic discovery of high-value information with partial VAE. In International Conference on Machine Learning, pp. 4234-4243, 2019.
|
| 268 |
+
Wei Ma and George H. Chen. Missing not at random in matrix completion: The effectiveness of estimating missingness probabilities under a low nuclear norm assumption. In Advances in Neural Information Processing Systems, pp. 14871-14880, 2019.
|
| 269 |
+
Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. The concrete distribution: A continuous relaxation of discrete random variables. In International Conference on Learning Representations, 2017.
|
| 270 |
+
Benjamin M. Marlin and Richard S. Zemel. Collaborative prediction and ranking with non-random missing data. In Proceedings of the third ACM conference on Recommender systems, pp. 5-12. ACM, 2009.
|
| 271 |
+
Benjamin M Marlin, Richard S Zemel, Sam Roweis, and Malcolm Slaney. Collaborative filtering and the missing at random assumption. In Proceedings of the Twenty-Third Conference on Uncertainty in Artificial Intelligence, pp. 267-275. AUAI Press, 2007.
|
| 272 |
+
Pierre-Alexandre Mattei and Jes Frellsen. Leveraging the exact likelihood of deep latent variable models. In Advances in Neural Information Processing Systems, volume 31, pp. 3855-3866, 2018a.
|
| 273 |
+
Pierre-Alexandre Mattei and Jes Frellsen. Refit your encoder when new data comes by. In 3rd NeurIPS workshop on Bayesian Deep Learning, 2018b.
|
| 274 |
+
Pierre-Alexandre Mattei and Jes Frellsen. MIWAE: Deep generative modelling and imputation of incomplete data sets. In International Conference on Machine Learning, pp. 4413-4423, 2019.
|
| 275 |
+
Andriy Mnih and Russ R. Salakhutdinov. Probabilistic matrix factorization. In Advances in Neural Information Processing Systems, volume 20, pp. 1257-1264, 2008.
|
| 276 |
+
Shakir Mohamed, Mihaela Rosca, Michael Figurnov, and Andriy Mnih. Monte carlo gradient estimation in machine learning. Journal of Machine Learning Research, 21(132):1-62, 2020.
|
| 277 |
+
Karthika Mohan and Judea Pearl. Graphical models for processing missing data. Journal of American Statistical Association (in press), 2021.
|
| 278 |
+
Geert Molenberghs, Caroline Beunckens, Cristina Sotto, and Michael G. Kenward. Every missingness not at random model has a missingness at random counterpart with equal fit. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 70(2):371-388, 2008.
|
| 279 |
+
Razieh Nabi, Rohit Bhattacharya, and Ilya Shpitser. Full law identification in graphical models of missing data: Completeness results. In International Conference on Machine Learning, pp. 7153-7163, 2020.
|
| 280 |
+
Alfredo Nazabal, Pablo M. Olmos, Zoubin Ghahramani, and Isabel Valera. Handling incomplete heterogeneous data using VAEs. Pattern Recognition, 107:107501, 2020.
|
| 281 |
+
Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Bo Wu, and Andrew Y Ng. Reading digits in natural images with unsupervised feature learning. In NIPS 2011 Workshop on Deep Learning and Unsupervised Feature Learning, 2011.
|
| 282 |
+
Danilo Jimenez Rezende, Shakir Mohamed, and Daan Wierstra. Stochastic backpropagation and approximate inference in deep generative models. In International Conference on Machine Learning, pp. 1278-1286, 2014.
|
| 283 |
+
Christian Robert. The Bayesian choice: from decision-theoretic foundations to computational implementation. Springer Science & Business Media, 2007.
|
| 284 |
+
Sam T. Roweis. EM algorithms for PCA and SPCA. In Advances in neural information processing systems, pp. 626-632, 1998.
|
| 285 |
+
|
| 286 |
+
Donald B. Rubin. Inference and missing data. Biometrika, 63(3):581-592, 1976.
|
| 287 |
+
Donald B. Rubin. Formalizing subjective notions about the effect of nonrespondents in sample surveys. Journal of the American Statistical Association, 72(359):538-543, 1977.
|
| 288 |
+
Donald B. Rubin. Multiple imputation after 18+ years. Journal of the American statistical Association, 91(434):473-489, 1996.
|
| 289 |
+
Maurizio Sadinle and Jerome P. Reiter. Sequential identification of nonignorable missing data mechanisms. Statistica Sinica, 28(4):1741-1759, 2018.
|
| 290 |
+
Tobias Schnabel, Adith Swaminathan, Ashudeep Singh, Navin Chandak, and Thorsten Joachims. Recommendations as treatments: Debiasing learning and evaluation. In International conference on machine learning, pp. 1670-1679, 2016.
|
| 291 |
+
Shaun Seaman, John Galati, Dan Jackson, and John Carlin. What is meant by "missing at random"? Statistical Science, 28(2):257-268, 2013.
|
| 292 |
+
Suvash Sedhain, Aditya Krishna Menon, Scott Sanner, and Lexing Xie. AutoRec: Autoencoders meet collaborative filtering. In Proceedings of the 24th international conference on World Wide Web, pp. 111-112, 2015.
|
| 293 |
+
Aude Sportisse, Claire Boyer, and Julie Josse. Imputation and low-rank estimation with missing not at random data. Statistics and Computing, 30(6):1629-1643, 2020a.
|
| 294 |
+
Aude Sportisse, Claire Boyer, and Julie Josse. Estimation and imputation in probabilistic principal component analysis with missing not at random data. In Advances in Neural Information Processing Systems, volume 33, pp. 7067-7077, 2020b.
|
| 295 |
+
Harald Steck. Evaluation of recommendations: rating-prediction and ranking. In Proceedings of the 7th ACM conference on Recommender systems, pp. 213-220. ACM, 2013.
|
| 296 |
+
Daniel J. Stekhoven and Peter Buhlmann. MissForest—non-parametric missing value imputation for mixed-type data. Bioinformatics, 28(1):112-118, 2012.
|
| 297 |
+
Niansheng Tang and Yuanyuan Ju. Statistical inference for nonignorable missing-data problems: a selective review. Statistical Theory and Related Fields, 2(2):105-133, 2018.
|
| 298 |
+
Michael E. Tipping and Christopher M. Bishop. Probabilistic principal component analysis. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 61(3):611-622, 1999.
|
| 299 |
+
Xiaojie Wang, Rui Zhang, Yu Sun, and Jianzhong Qi. Doubly robust joint learning for recommendation on data missing not at random. In International Conference on Machine Learning, pp. 6638-6647, 2019.
|
| 300 |
+
Samuel Wiqvist, Pierre-Alexandre Mattei, Umberto Picchini, and Jes Frellsen. Partially exchangeable networks and architectures for learning summary statistics in approximate Bayesian computation. In International Conference on Machine Learning, pp. 6798-6807, 2019.
|
| 301 |
+
Jinsung Yoon, James Jordon, and Mihaela Van Der Schaar. GAIN: Missing data imputation using generative adversarial nets. In Proceedings of the 25th international conference on Machine learning, pp. 5689-5698, 2018.
|
| 302 |
+
Manzil Zaheer, Satwik Kottur, Siamak Ravanbakhsh, Barnabas Poczos, Ruslan Salakhutdinov, and Alexander J Smola. Deep sets. In Advances in Neural Information Processing Systems, volume 30, pp. 3391-3401, 2017.
|
| 303 |
+
|
| 304 |
+
<table><tr><td></td><td>Banknote</td><td>Concrete</td><td>Red</td><td>White</td><td>Yeast</td><td>Breast</td></tr><tr><td colspan="7">not-MIwAE - PPCA</td></tr><tr><td>agnostic</td><td>0.80 ± 0.03</td><td>0.75 ± 0.05</td><td>0.88 ± 0.01</td><td>0.83 ± 0.00</td><td>0.78 ± 0.02</td><td>0.96 ± 0.00</td></tr><tr><td>self-masking</td><td>0.92 ± 0.05</td><td>0.95 ± 0.00</td><td>0.96 ± 0.00</td><td>0.97 ± 0.00</td><td>0.99 ± 0.00</td><td>0.98 ± 0.00</td></tr><tr><td>self-masking known</td><td>0.98 ± 0.00</td><td>0.95 ± 0.00</td><td>0.96 ± 0.00</td><td>0.97 ± 0.00</td><td>1.00 ± 0.00</td><td>0.97 ± 0.00</td></tr><tr><td colspan="7">not-MIwAE</td></tr><tr><td>agnostic</td><td>0.92 ± 0.01</td><td>0.54 ± 0.04</td><td>0.91 ± 0.00</td><td>0.88 ± 0.00</td><td>0.80 ± 0.00</td><td>0.93 ± 0.00</td></tr><tr><td>self-masking</td><td>0.99 ± 0.00</td><td>0.93 ± 0.02</td><td>0.95 ± 0.01</td><td>0.90 ± 0.02</td><td>0.71 ± 0.02</td><td>0.98 ± 0.00</td></tr><tr><td>self-masking known</td><td>0.99 ± 0.00</td><td>0.97 ± 0.00</td><td>0.97 ± 0.00</td><td>0.95 ± 0.00</td><td>0.78 ± 0.00</td><td>0.98 ± 0.00</td></tr></table>
|
| 305 |
+
|
| 306 |
+
Table A1: Mask prediction accuracies on UCI datasets using fully observed data.
|
| 307 |
+
|
| 308 |
+
# A IMPLEMENTATION DETAILS
|
| 309 |
+
|
| 310 |
+
In all experiments we used TensorFlow probability (Dillon et al., 2017) and the Adam optimizer (Kingma & Ba, 2014) with a learning rate of 0.001. Gaussian distributions were used both as the variational distribution in latent space and the observation model in data space. No regularization was used. Similar settings were used for the MIWAE and the not-MIWAE, except for the missing model which is exclusive to the not-MIWAE.
|
| 311 |
+
|
| 312 |
+
Source code is available at: https://github.com/nbip/notMIWAE
|
| 313 |
+
|
| 314 |
+
# A.1 UCI
|
| 315 |
+
|
| 316 |
+
The encoder and decoder consist of two hidden layers with 128 units and tanh activation functions. In the PPCA-like models, the decoder is a linear mapping from latent space to data space, with a learnt variance shared across features. The size of the latent space is set to $p - 1$ , $K = 20$ importance samples were used during training and a batch size of 16 was used for 100k iterations. Data are standardized before missing is introduced. The imputation RMSE is estimated using 10k importance samples and the mean and standard errors are found over 5 runs.
|
| 317 |
+
|
| 318 |
+
Since the imputation error in a real-world setting cannot be monitored during training, neither on a train or validation set, early stopping cannot be done based on this. Both the MIWAE and not-MIwAE are trained for a fixed number of iterations. In the low-rank joint model of Sportisse et al. (2020a), model selection needs to be done for the penalization parameter $\lambda^1$ . In order to do this we add $5\%$ missing values (MCAR) to the concatenated matrix of data and mask and use the imputation error on this added missing data to select the optimal lambda. The model is then trained on the original data using the optimal $\lambda$ to get the imputation error.
|
| 319 |
+
|
| 320 |
+
For evaluating the learnt missing model, we report mask classification accuracies when feeding fully observed data as input to the missing model, see table A1. As the missing model contains more prior information, the classification accuracy becomes better and better.
|
| 321 |
+
|
| 322 |
+
# A.2 SVHN
|
| 323 |
+
|
| 324 |
+
For the encoder and decoder a convolutional structure was used (see tables A2 and A3) together with ReLU activations and a latent space of dimension 20. $K = 5$ importance samples were used during training and a batch size of 64 was used for 1M iterations. The variance in the observation model was lower bounded at $\sim 0.02$ .
|
| 325 |
+
|
| 326 |
+
# A.3 YAHOO!
|
| 327 |
+
|
| 328 |
+
The MIWAE and the not-MIWAE were trained on the MNAR ratings and the imputation error was evaluated on the MCAR ratings (when encoding the MNAR ratings). We used the permutation invariant encoder by Ma et al. (2018) with an embedding size of 20 and a code size of 50, along with a linear mapping to a latent space of size 30. In the Gaussian observation model, the decoder is a linear mapping and there is a sigmoid activation of the mean in data space, scaled to match the scale
|
| 329 |
+
|
| 330 |
+
Table A2: SVHN encoder
|
| 331 |
+
|
| 332 |
+
<table><tr><td>layer(size)</td></tr><tr><td>Input x (32 × 32 × 1)Conv2D(16 × 16 × 64)Conv2D(8 × 8 × 128)Conv2D(4 × 4 × 256)Reshape(4096)</td></tr><tr><td>μ: Dense(20)</td></tr><tr><td>log σ: Dense(20)</td></tr></table>
|
| 333 |
+
|
| 334 |
+
Table A3: SVHN decoder
|
| 335 |
+
|
| 336 |
+
<table><tr><td>layer(size)</td></tr><tr><td>Latent variable z(20)</td></tr><tr><td>Dense(4096)</td></tr><tr><td>Reshape(4 × 4 × 256)</td></tr><tr><td>Conv2Dtranspose(8 × 8 × 256)</td></tr><tr><td>Conv2Dtranspose(16 × 16 × 128)</td></tr><tr><td>μ:</td></tr><tr><td>Conv2Dtranspose(32 × 32 × 64)</td></tr><tr><td>Conv2Dtranspose(32 × 32 × 1) sigmoid</td></tr><tr><td>log σ:</td></tr><tr><td>Conv2Dtranspose(32 × 32 × 64)</td></tr><tr><td>Conv2Dtranspose(32 × 32 × 1)</td></tr></table>
|
| 337 |
+
|
| 338 |
+
of the ratings. The categorical observation model also has a linear mapping to its logits. In both latent space and data space, we learn shared variance parameters in each dimension. The missing model is a logistic regression for each feature, with a shared weight across features and individual biases for each feature. We use $K = 20$ importance samples during training, ReLU activations, a batch size of 100 and train for 10k iterations.
|
| 339 |
+
|
| 340 |
+
We follow the setup of Wang et al. (2019) and compare to the following approaches:
|
| 341 |
+
|
| 342 |
+
CPT-v: Marlin et al. (2007) show that a multinomial mixture model with a Conditional Probability Tables missing model give better performance than the multinomial mixture model without missing model. The approach is further expanded by Marlin & Zemel (2009), where a logistic model, Logit- $\mathbf{v}\mathbf{d}$ , is also tried as the missing model. The result for the CPT-v model and the Logit-vd model are taken from the supplementary material of Hernandez-Lobato et al. (2014).
|
| 343 |
+
|
| 344 |
+
MF-MNAR: Hernández-Lobato et al. (2014) extended probabilistic matrix factorization to include a missing data model for data missing not at random in a collaborative filtering setting. Results are from the supplementary material of the paper.
|
| 345 |
+
|
| 346 |
+
MF-IPS: Schnabel et al. (2016) applied propensity-based methods from causal inference to matrix factorization, specifically inverse-propensity-scoring, IPS. The propensities used to debias the matrix factorization are the probabilities of a rating being observed for each (user, item) pair. The propensities used for training are found using $5\%$ of the MCAR test-set. Results are from the paper.
|
| 347 |
+
|
| 348 |
+
MF-DR-JL and NFM-DR-JL: Wang et al. (2019) combines the propensity-scoring approach from Schnabel et al. (2016) with an error-imputation approach by Steck (2013) to obtain a doubly robust estimator. This is used both with matrix factorization and in neural factorization machines (He & Chua, 2017). As for Schnabel et al. (2016), $5\%$ of the MCAR test-set is used to learn the propensities. Results are from the paper.
|
| 349 |
+
|
| 350 |
+
In addition to these debiasing approaches, we compare to the following methods, which do not take the missing process into account: MF (Koren et al., 2009), PMF (Mnih & Salakhutdinov, 2008), AutoRec (Sedhain et al., 2015) and Gaussian VAE (Liang et al., 2018). The presented results for these methods are from (Wang et al., 2019).
|
| 351 |
+
|
| 352 |
+
# B IMPUTATION
|
| 353 |
+
|
| 354 |
+
Once the model has been trained, it is possible to use it to impute the missing values. If our performance metric is a loss function $L(\pmb{x}^{\mathrm{m}}, \pmb{y}^{\mathrm{m}})$ , optimal imputations $\hat{\pmb{x}}^{\mathrm{m}}$ minimise $\mathbb{E}_{\pmb{x}^{\mathrm{m}}} [L(\pmb{x}^{\mathrm{m}}, \hat{\pmb{x}}^{\mathrm{m}})|\pmb{x}^{\mathrm{o}}, s]$ . Many loss functions can be minimized using moments of the conditional distribution of the missing values, given the observed. Similarly to Mattei & Frellsen (2019, equations 10-11), these moments can be estimated via self-normalised importance sampling. For any
|
| 355 |
+
|
| 356 |
+
function of the missing data $h(\pmb{x}^{\mathrm{m}})$
|
| 357 |
+
|
| 358 |
+
$$
|
| 359 |
+
\mathbb {E} \left[ h \left(\boldsymbol {x} ^ {\mathrm {m}}\right) \mid \boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {s} \right] = \int h \left(\boldsymbol {x} ^ {\mathrm {m}}\right) p \left(\boldsymbol {x} ^ {\mathrm {m}} \mid \boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {s}\right) \mathrm {d} \boldsymbol {x} ^ {\mathrm {m}}. \tag {12}
|
| 360 |
+
$$
|
| 361 |
+
|
| 362 |
+
Using Bayes's theorem, we get
|
| 363 |
+
|
| 364 |
+
$$
|
| 365 |
+
\mathbb {E} \left[ h \left(\boldsymbol {x} ^ {\mathrm {m}}\right) \mid \boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {s} \right] = \int h \left(\boldsymbol {x} ^ {\mathrm {m}}\right) \frac {p \left(\boldsymbol {s} \mid \boldsymbol {x} ^ {\mathrm {o}} , \boldsymbol {x} ^ {\mathrm {m}}\right) p \left(\boldsymbol {x} ^ {\mathrm {m}} , \boldsymbol {x} ^ {\mathrm {o}}\right)}{p (\boldsymbol {s} , \boldsymbol {x} ^ {\mathrm {o}})} \mathrm {d} \boldsymbol {x} ^ {\mathrm {m}}, \tag {13}
|
| 366 |
+
$$
|
| 367 |
+
|
| 368 |
+
and now we can introduce the latent variable:
|
| 369 |
+
|
| 370 |
+
$$
|
| 371 |
+
\mathbb {E} \left[ h \left(\boldsymbol {x} ^ {\mathrm {m}}\right) \mid \boldsymbol {x} _ {i} ^ {\mathrm {o}}, \boldsymbol {s} \right] = \iint h \left(\boldsymbol {x} ^ {\mathrm {m}}\right) \frac {p \left(\boldsymbol {s} \mid \boldsymbol {x} ^ {\mathrm {o}} , \boldsymbol {x} ^ {\mathrm {m}}\right) p \left(\boldsymbol {x} ^ {\mathrm {m}} \mid \boldsymbol {z}\right) p \left(\boldsymbol {x} ^ {\mathrm {o}} \mid \boldsymbol {z}\right) p (\boldsymbol {z})}{p \left(\boldsymbol {s} , \boldsymbol {x} ^ {\mathrm {o}}\right)} d \boldsymbol {z} d \boldsymbol {x} ^ {\mathrm {m}}. \tag {14}
|
| 372 |
+
$$
|
| 373 |
+
|
| 374 |
+
Using self-normalised importance sampling on this last integral with proposal $q_{\gamma}(z|\pmb{x}^{\mathrm{o}})p_{\theta}(\pmb{x}^{\mathrm{m}}|\pmb{z})$ leads to the estimate
|
| 375 |
+
|
| 376 |
+
$$
|
| 377 |
+
\hat {\boldsymbol {x}} ^ {\mathrm {m}} = \mathbb {E} [ h (\boldsymbol {x} ^ {\mathrm {m}}) | \boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {s} ] \approx \sum_ {k = 1} ^ {K} \alpha_ {k} h \left(\boldsymbol {x} _ {k} ^ {\mathrm {m}}\right), \text {w i t h} \alpha_ {k} = \frac {w _ {k}}{w _ {1} + . . . + w _ {K}}, \tag {15}
|
| 378 |
+
$$
|
| 379 |
+
|
| 380 |
+
where the weights $w_{1},\ldots ,w_{K}$ are incidentally identical to the ones used for training:
|
| 381 |
+
|
| 382 |
+
$$
|
| 383 |
+
\forall k \leq K, w _ {k} = \frac {p _ {\phi} (\boldsymbol {s} | \boldsymbol {x} ^ {\mathrm {o}} , \boldsymbol {x} _ {k} ^ {\mathrm {m}}) p _ {\theta} \left(\boldsymbol {x} ^ {\mathrm {o}} \mid \boldsymbol {z} _ {k}\right) p \left(\boldsymbol {z} _ {k}\right)}{q _ {\gamma} \left(\boldsymbol {z} _ {k} \mid \boldsymbol {x} ^ {\mathrm {o}}\right)}, \tag {16}
|
| 384 |
+
$$
|
| 385 |
+
|
| 386 |
+
and $(\pmb{z}_1, \pmb{x}_1^{\mathrm{m}}), \dots, (\pmb{z}_K, \pmb{x}_K^{\mathrm{m}})$ are $K$ i.i.d. samples from $q_{\gamma}(\pmb{z}|\pmb{x}^{\mathrm{o}}) p_{\theta}(\pmb{x}^{\mathrm{m}}|\pmb{z})$ . If the quantity $\mathbb{E}[h(\pmb{x}^{\mathrm{m}})|\pmb{z}]$ is easy to compute, then a Rao-Blackwellized version of equation (15) should be preferred
|
| 387 |
+
|
| 388 |
+
$$
|
| 389 |
+
\hat {\boldsymbol {x}} ^ {\mathrm {m}} = \mathbb {E} [ h (\boldsymbol {x} ^ {\mathrm {m}}) | \boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {s} ] \approx \sum_ {k = 1} ^ {K} \alpha_ {k} \mathbb {E} [ h (\boldsymbol {x} ^ {\mathrm {m}}) | \boldsymbol {z} _ {k} ]. \tag {17}
|
| 390 |
+
$$
|
| 391 |
+
|
| 392 |
+
Squared loss When $L$ corresponds to the squared error, the optimal imputation will be the conditional mean that can be estimated using the method above (in that case, $h$ is the identity function):
|
| 393 |
+
|
| 394 |
+
$$
|
| 395 |
+
\hat {\boldsymbol {x}} ^ {\mathrm {m}} = \mathbb {E} [ \boldsymbol {x} ^ {\mathrm {m}} | \boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {s} ] \approx \sum_ {k = 1} ^ {K} \alpha_ {k} \mathbb {E} [ \boldsymbol {x} ^ {\mathrm {m}} | \boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {s} ], \text {w i t h} \alpha_ {k} = \frac {w _ {k}}{w _ {1} + \dots + w _ {K}}. \tag {18}
|
| 396 |
+
$$
|
| 397 |
+
|
| 398 |
+
Absolute loss When $L$ is the absolute error loss, the optimal imputation is the conditional median, that can be estimated using the same technique and at little additional cost compared to the mean. Indeed, we can estimate the cumulative distribution function of each missing feature $j \in \{1, \dots, p\}$ :
|
| 399 |
+
|
| 400 |
+
$$
|
| 401 |
+
F _ {j} \left(x _ {j}\right) = \mathbb {E} \left[ \mathbf {1} _ {x _ {j} ^ {\mathrm {m}} \leq x _ {j}} \mid \boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {s} \right] \approx \sum_ {k = 1} ^ {K} \alpha_ {k} F _ {x _ {j} \mid \boldsymbol {x} ^ {\mathrm {o}}, \boldsymbol {s}} \left(x _ {j}\right), \tag {19}
|
| 402 |
+
$$
|
| 403 |
+
|
| 404 |
+
where $F_{x_j|\pmb{x}^{\mathrm{o}},\pmb{s}}$ is the cumulative distribution function of $x_{j}|\pmb{x}^{\mathrm{o}},\pmb{s}$ , which will often be available in closed-form (e.g. in the case of a Gaussian, Bernoulli or Student's $t$ observation model). We can then use this estimate to approximately solve $F_{j}(x_{j}) = 0.5$ . More generally, if $L$ is a multilinear loss, optimal imputations will be quantiles (see e.g. Robert, 2007, section 2.5.2) that can be estimated using equation (19). The consistency of similar quantile estimates was studied by Glynn (1996).
|
| 405 |
+
|
| 406 |
+
Multiple imputation. It is also possible to perform multiple imputation with the same computations. One can obtain approximate samples from $p(\boldsymbol{x}^{\mathrm{m}}|\boldsymbol{x}^{\mathrm{o}})$ using sampling importance resampling with the same set of weights. This allows us to do both single and multiple imputation with the same computations.
|
| 407 |
+
|
| 408 |
+
# C MISSING MODEL, GROUP THEORETIC APPROACH
|
| 409 |
+
|
| 410 |
+
A more complex form of prior information that can be used to choose the form of $\pi_{\phi}(\pmb{x})$ is group-theoretic. For example, we may know a priori that $p_{\phi}(s|\pmb{x})$ is invariant to a certain group action $g\cdot \pmb{x}$ on the data space:
|
| 411 |
+
|
| 412 |
+
$$
|
| 413 |
+
\forall g, p _ {\phi} (\boldsymbol {s} | \boldsymbol {x}) = p _ {\phi} (\boldsymbol {s} | g \cdot \boldsymbol {x}). \tag {20}
|
| 414 |
+
$$
|
| 415 |
+
|
| 416 |
+
This would for example be the case, if the data sets were made of images whose class is invariant to translations (which is the case of most image data sets, like MNIST or SVHN), and with a missing model only dependent on the class. Similarly, one may know that the missing process is equivariant:
|
| 417 |
+
|
| 418 |
+
$$
|
| 419 |
+
\forall g, p _ {\phi} (g \cdot \boldsymbol {s} | \boldsymbol {x}) = p _ {\phi} (\boldsymbol {s} | g ^ {- 1} \cdot \boldsymbol {x}). \tag {21}
|
| 420 |
+
$$
|
| 421 |
+
|
| 422 |
+
Again, such a setting can appear when there is strong geometric structure in the data (e.g. with images or proteins). Invariance or equivariance can be built in the architecture of $\pi_{\phi}(\pmb{x})$ by leveraging the quite large body of work on invariant/equivariant convolutional neural networks, see e.g. Bietti & Mairal (2017); Cohen et al. (2019); Zaheer et al. (2017); Wiqvist et al. (2019); Bloem-Reddy & Teh (2020), and references therein.
|
| 423 |
+
|
| 424 |
+
# D THEORETICAL PROPERTIES OF THE NOT-MIWAE BOUND
|
| 425 |
+
|
| 426 |
+
The properties of the not-MIwAE bound are directly inherited from the ones of the usual IWAE bound. Indeed, as we will see, the not-MIwAE bound is a particular instance of IWAE bound with an extended latent space composed of both the code and the missing values. More specifically, recall the definition of the not-MIwAE bound
|
| 427 |
+
|
| 428 |
+
$$
|
| 429 |
+
\mathcal {L} _ {K} (\theta , \phi , \gamma) = \sum_ {i = 1} ^ {n} \mathbb {E} \left[ \log \frac {1}{K} \sum_ {k = 1} ^ {K} w _ {k i} \right], \text {w i t h} w _ {k i} = \frac {p _ {\theta} \left(\boldsymbol {x} _ {i} ^ {\mathrm {o}} \mid \boldsymbol {z} _ {k i}\right) p _ {\phi} \left(\boldsymbol {s} _ {i} \mid \boldsymbol {x} _ {i} ^ {\mathrm {o}} , \boldsymbol {x} _ {k i} ^ {\mathrm {m}}\right) p \left(\boldsymbol {z} _ {k i}\right)}{q _ {\gamma} \left(\boldsymbol {z} _ {k i} \mid \boldsymbol {x} _ {i} ^ {\mathrm {o}}\right)}. \tag {22}
|
| 430 |
+
$$
|
| 431 |
+
|
| 432 |
+
Each $i$ th term of the sum can be seen as an IwAE bound with extended latent variable $(\pmb{z}_{ki},\pmb{x}_{ki}^{\mathrm{m}})$ , whose prior is $p_{\theta}(\pmb{x}_{ki}^{\mathrm{m}}|\pmb{z}_{ki})p(\pmb{z}_{ki})$ . The related importance sampling proposal of the $i$ th term is $p_{\theta}(\pmb{x}_{ki}^{\mathrm{m}}|\pmb{z}_{ki})q_{\gamma}(\pmb{z}_{ki}|\pmb{x}_i^0)$ , and the observation model is $p_{\phi}(\pmb{s}_i|\pmb{x}_i^0,\pmb{x}_{ki}^{\mathrm{m}})p_{\theta}(\pmb{x}_i^0|\pmb{z}_{ki})$ .
|
| 433 |
+
|
| 434 |
+
Since all $n$ terms of the sum are IWAE bounds, Theorem 1 from Burda et al. (2016) directly gives the monotonicity property:
|
| 435 |
+
|
| 436 |
+
$$
|
| 437 |
+
\mathcal {L} _ {1} (\theta , \phi , \gamma) \leq \dots \leq \mathcal {L} _ {K} (\theta , \phi , \gamma). \tag {23}
|
| 438 |
+
$$
|
| 439 |
+
|
| 440 |
+
Regarding convergence of the bound to the true likelihood, we can use Theorem 3 of Domke & Sheldon (2018) for each term of the sum to get the following result.
|
| 441 |
+
|
| 442 |
+
Theorem. Assuming that, for all $i \in \{1, \dots, n\}$ ,
|
| 443 |
+
|
| 444 |
+
- there exists $\alpha_{i} > 0$ such that $\mathbb{E}\left[|w_{1i} - p_{\theta ,\phi}(\pmb{x}_i^\sigma ,\pmb{s}_i)|^2 +\alpha_i\right] < \infty$
|
| 445 |
+
- $\lim \sup_{K\longrightarrow \infty}\mathbb{E}\left[K / (w_{1i} + \ldots +w_{Ki})\right] < \infty ,$
|
| 446 |
+
|
| 447 |
+
the not-MIwAE bound converges to the true likelihood at rate $1 / K$ :
|
| 448 |
+
|
| 449 |
+
$$
|
| 450 |
+
\ell (\theta , \phi) - \mathcal {L} _ {K} (\theta , \phi , \gamma) \underset {K \rightarrow \infty} {\sim} \frac {1}{K} \sum_ {i = 1} ^ {n} \frac {\operatorname {V a r} \left[ w _ {1 i} \right]}{2 p _ {\theta , \phi} \left(\boldsymbol {x} _ {i} ^ {o} , \boldsymbol {s} _ {i}\right) ^ {2}}. \tag {24}
|
| 451 |
+
$$
|
| 452 |
+
|
| 453 |
+
# E VARYING MISSING RATE (UCI)
|
| 454 |
+
|
| 455 |
+
The UCI experiments use a self-masking missing process in half the features: when the feature value is higher than the feature mean it is set to missing. In order to investigate varying missing rates we change the cutoff point from the mean to the mean plus an offset. The offsets used are $\{0, 0.25, 0.5, 0.75, 1.0\}$ , so that the largest cutoff point will be the mean plus one standard deviation. Increasing the cutoff point further results in mainly imputing outliers. Results for PPCA and not-MIwAE PPCA using the agnostic missing model are seen in figure 5 and using the self-masking model with known sign of the weights are seen in figure 6. Figure 7 shows the results for MIWAE and not-MIwAE using self-masking with known sign of the weights.
|
| 456 |
+
|
| 457 |
+

|
| 458 |
+
(a) Bank
|
| 459 |
+
|
| 460 |
+

|
| 461 |
+
(b) Concrete
|
| 462 |
+
|
| 463 |
+

|
| 464 |
+
(c) Red
|
| 465 |
+
|
| 466 |
+

|
| 467 |
+
(d) White
|
| 468 |
+
|
| 469 |
+

|
| 470 |
+
(e) Yeast
|
| 471 |
+
|
| 472 |
+

|
| 473 |
+
(f) Breast
|
| 474 |
+
Figure 5: PPCA agnostic: Imputation RMSE at varying missing rates on UCI datasets. The variation in missing rate is obtained by changing the cutoff point using an offset, so that an offset $= 0$ corresponds to using the mean as the cutoff point while an offset $= 1$ corresponds to using the mean plus one standard deviation as the cutoff point. Results are averages over 2 runs.
|
| 475 |
+
|
| 476 |
+

|
| 477 |
+
(a) Bank
|
| 478 |
+
|
| 479 |
+

|
| 480 |
+
(b) Concrete
|
| 481 |
+
|
| 482 |
+

|
| 483 |
+
(c) Red
|
| 484 |
+
|
| 485 |
+

|
| 486 |
+
(d) White
|
| 487 |
+
Figure 6: PPCA self-masking known: Imputation RMSE at varying missing rates on UCI datasets. The variation in missing rate is obtained by changing the cutoff point using an offset, so that an offset $= 0$ corresponds to using the mean as the cutoff point while an offset $= 1$ corresponds to using the mean plus one standard deviation as the cutoff point. Results are averages over 2 runs.
|
| 488 |
+
|
| 489 |
+

|
| 490 |
+
(e) Yeast
|
| 491 |
+
|
| 492 |
+

|
| 493 |
+
(f) Breast
|
| 494 |
+
|
| 495 |
+

|
| 496 |
+
(a) Bank
|
| 497 |
+
|
| 498 |
+

|
| 499 |
+
(b) Concrete
|
| 500 |
+
|
| 501 |
+

|
| 502 |
+
(c) Red
|
| 503 |
+
|
| 504 |
+

|
| 505 |
+
(d) White
|
| 506 |
+
|
| 507 |
+

|
| 508 |
+
(e) Yeast
|
| 509 |
+
|
| 510 |
+

|
| 511 |
+
(f) Breast
|
| 512 |
+
Figure 7: Self-masking known: Imputation RMSE at varying missing rates on UCI datasets. The variation in missing rate is obtained by changing the cutoff point using an offset, so that an offset $= 0$ corresponds to using the mean as the cutoff point while an offset $= 1$ corresponds to using the mean plus one standard deviation as the cutoff point. Results are averages over 2 runs.
|
notmiwaedeepgenerativemodellingwithmissingnotatrandomdata/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7e96d8823f3f9aede678b88b1590f1991322dad4b64d69754233da92a71ee8be
|
| 3 |
+
size 626149
|
notmiwaedeepgenerativemodellingwithmissingnotatrandomdata/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f443f8fa5c94ec5d393117e0a2ec5e8f91dcdff2bbc0bbd91e9e0917903373fe
|
| 3 |
+
size 609633
|
tiltedempiricalriskminimization/a1a012d8-2343-44f4-9b78-b923307467ac_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f461c03516e0a3998199a3122ef75dda4823160975bf9852ddf5afe944bf876
|
| 3 |
+
size 278970
|
tiltedempiricalriskminimization/a1a012d8-2343-44f4-9b78-b923307467ac_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c227225dd8647a2b222d6287bb8466a46ab480f94e3754c16d8fca2f3fc225a7
|
| 3 |
+
size 329228
|
tiltedempiricalriskminimization/a1a012d8-2343-44f4-9b78-b923307467ac_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0b9be40090a7fcc55fdf54e9aef43481acfa99fec4f063a6554a7f925ad9d8ee
|
| 3 |
+
size 1878293
|
tiltedempiricalriskminimization/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tiltedempiricalriskminimization/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7a18d058594f6d8ffe88298347eaa3ca4849a9f36f48659b6f6873e8b3388d8c
|
| 3 |
+
size 1975003
|
tiltedempiricalriskminimization/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a4851ac19168e91399ade94c9b70d5a18c27e7248a405c8ca0fd3cf1ea018ab2
|
| 3 |
+
size 1702547
|
tomographicautoencoderunsupervisedbayesianrecoveryofcorrupteddata/27ed7f54-bd21-41d8-b443-e6a7645e0828_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3be4360878ccab5e914fdf0a2f9a0b33cf8e9e64a8e176378b16dc3b1a79f53e
|
| 3 |
+
size 135050
|
tomographicautoencoderunsupervisedbayesianrecoveryofcorrupteddata/27ed7f54-bd21-41d8-b443-e6a7645e0828_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:debdbc93d8388e80760b1a88b405be6137c6d352c49a34c0005ba8cca0c825ac
|
| 3 |
+
size 164686
|
tomographicautoencoderunsupervisedbayesianrecoveryofcorrupteddata/27ed7f54-bd21-41d8-b443-e6a7645e0828_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7acd7986c615b6c6cd94b9bccb54fbfe8e4d8df5b143e087ecb705519812391b
|
| 3 |
+
size 3421667
|
tomographicautoencoderunsupervisedbayesianrecoveryofcorrupteddata/full.md
ADDED
|
@@ -0,0 +1,584 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TOMOGRAPHIC AUTO-ENCODER: UNSUPERVISED BAYESIAN RECOVERY OF CORRUPTED DATA
|
| 2 |
+
|
| 3 |
+
# Francesco Tonolini
|
| 4 |
+
|
| 5 |
+
School of Computing Science
|
| 6 |
+
|
| 7 |
+
University of Glasgow
|
| 8 |
+
|
| 9 |
+
Glasgow, UK
|
| 10 |
+
|
| 11 |
+
2402432t@student.gla.ac.uk
|
| 12 |
+
|
| 13 |
+
# Pablo Garcia Moreno
|
| 14 |
+
|
| 15 |
+
Amazon
|
| 16 |
+
|
| 17 |
+
London, UK
|
| 18 |
+
|
| 19 |
+
morepabl@amazon.co.uk
|
| 20 |
+
|
| 21 |
+
# Andreas Damianou
|
| 22 |
+
|
| 23 |
+
Amazon
|
| 24 |
+
|
| 25 |
+
London, UK
|
| 26 |
+
|
| 27 |
+
damianou@amazon.co.uk
|
| 28 |
+
|
| 29 |
+
# Roderick Murray-Smith
|
| 30 |
+
|
| 31 |
+
School of Computing Science
|
| 32 |
+
|
| 33 |
+
University of Glasgow
|
| 34 |
+
|
| 35 |
+
Glasgow, UK
|
| 36 |
+
|
| 37 |
+
roderick.murray-smith@glasgow.ac.uk
|
| 38 |
+
|
| 39 |
+
# ABSTRACT
|
| 40 |
+
|
| 41 |
+
We propose a new probabilistic method for unsupervised recovery of corrupted data. Given a large ensemble of degraded samples, our method recovers accurate posteriors of clean values, allowing the exploration of the manifold of possible reconstructed data and hence characterising the underlying uncertainty. In this setting, direct application of classical variational methods often gives rise to collapsed densities that do not adequately explore the solution space. Instead, we derive our novel reduced entropy condition approximate inference method that results in rich posteriors. We test our model in a data recovery task under the common setting of missing values and noise, demonstrating superior performance to existing variational methods for imputation and de-noising with different real data sets. We further show higher classification accuracy after imputation, proving the advantage of propagating uncertainty to downstream tasks with our model.
|
| 42 |
+
|
| 43 |
+
# 1 INTRODUCTION
|
| 44 |
+
|
| 45 |
+
Data sets are rarely clean and ready to use when first collected. More often than not, they need to undergo some form of pre-processing before analysis, involving expert human supervision and manual adjustments (Zhou et al., 2017; Chu et al., 2016). Filling missing entries, correcting noisy samples, filtering collection artefacts and other similar tasks are some of the most costly and time consuming stages in the data modeling process and pose an enormous obstacle to machine learning at scale (Munson, 2012). Traditional data cleaning methods rely on some degree of supervision in the form of a clean dataset or some knowledge collected from domain experts. However, the exponential increase of the data collection and storage rates in recent years, makes any supervised algorithm impractical in the context of modern applications that consume millions or billions of datapoints. In this paper, we introduce a novel variational framework to perform automated data cleaning and recovery without any example of clean data or prior signal assumptions.
|
| 46 |
+
|
| 47 |
+
The Tomographic auto-encoder (TAE), is named in analogy with standard tomography. Tomographic techniques for signal recovery aim at reconstructing a target signal, such as a 3D image, by algorithmically combining different incomplete measurements, such as 2D images from different view points, subsets of image pixels or other projections (Geyer et al., 2015). The TAE extends this concept to the reconstruction of data manifolds; our target signal is a clean data set, where corrupted data is interpreted as incomplete measurements. Our aim is to combine these to reconstruct the clean data.
|
| 48 |
+
|
| 49 |
+
More specifically, we are interested in performing Bayesian recovery, where we do not simply transform degraded samples into clean ones, but recover probabilistic functions, with which we can generate diverse clean signals and capture uncertainty. Uncertainty is considerably important when cleaning data. If we are over-confident about specific solutions, errors are easily ignored and passed
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
(a)
|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
(b)
|
| 56 |
+
Figure 1: (a) Example of Bayesian recovery from corrupted data with a Tomographic Auto-Encoder (TAE) on corrupted MNIST. The TAE recovers posterior probability densities $q(x|y_i)$ for each corrupted sample $y_i$ . We can draw from these to explore different possible clean solutions. (b) Two dimensional Bayesian recovery experiment. (i) Observed set of corrupted data $Y$ , with the point we are inferring from $y_i$ highlighted. (ii) Ground truth hidden clean data with the target point $x_i$ highlighted, along with the posterior $q(x|y_i)$ reconstructed by a VAE. (iii) Posterior $q(x|y_i)$ recovered with our TAE. While the VAE posterior collapses to a single point, the TAE reconstructs a rich posterior that adjusts to the data manifold.
|
| 57 |
+
|
| 58 |
+
on to downstream tasks. For instance, in the example of figure 1(a), some corrupted observations are consistent with multiple digits. If we were to impute a single possibility for each sample, the true underlying solution may be ignored early on in the modeling pipeline and the digit will be consistently mis-classified. If we are instead able to recover accurate probability densities, we can remain adequately uncertain in any subsequent processing task.
|
| 59 |
+
|
| 60 |
+
Several variational auto-encoder (VAE) models have been proposed for applications that can be considered special cases of this problem (Im et al., 2017; Nazabal et al., 2018; Ainsworth et al., 2018) and, in principle, they are capable of performing Bayesian reconstruction. However, we show that surrogating variational inference (VI) in a latent space with VAEs results in collapsed distributions that do not explore the different possibilities of clean samples, but only return single estimates. The TAE performs approximate VI in the space of recovered data instead, through our reduced entropy condition method. The resulting posteriors adequately explore the manifold of possible clean samples for each corrupted observation and, therefore, adequately capture the uncertainty of the task.
|
| 61 |
+
|
| 62 |
+
In our experiments we focus on data recovery from noisy samples and missing entries. This is one of the most common data corruption settings being encountered in a wide range of domains with different types of data (White et al., 2011; Kwak & Kim, 2017). By testing our approach in this prevalent scenario, we can closely compare with recently proposed VAE approaches (Nazabal et al., 2018; Dalca et al., 2019; Mattei & Frellsen, 2019). We show how the existing VAE models exhibit the posterior collapse problem while the TAE produces rich posteriors that capture the underlying uncertainty. We further test TAEs on classification subsequent to imputation, demonstrating superior performance to existing methods in these downstream tasks. Finally, we use a TAE to perform automated missing values imputation on raw depth maps from the NYU rooms data set.
|
| 63 |
+
|
| 64 |
+
# 2 METHOD
|
| 65 |
+
|
| 66 |
+
In order to frame the problem and understand the issues with standard variational methods in this context, we view the task from a signal reconstruction perspective. The final scope of a Bayesian data recovery method is to build and train a parametric probability density function (PDF) $q(x|y)$ , which takes as inputs corrupted samples $y$ and generates different possible corresponding clean data $x \sim q(x|y)$ through sampling. There are two aspects we need to design: i) the structure of this conditional PDF and ii) the way it will be trained to perform the recovery task.
|
| 67 |
+
|
| 68 |
+
Regarding the former, as natural data often lies on highly non-linear manifolds, we need the conditional PDF to capture complicated modalities, e.g. the distribution of plausible images consistent with one of the corrupted observations in figure 1(a). A suitable recovery PDF $q(x|y)$ needs to be able to capture such complexity. A natural choice to achieve high capacity and tractability is to
|
| 69 |
+
|
| 70 |
+

|
| 71 |
+
|
| 72 |
+

|
| 73 |
+
(b) VAE for Data Recovery
|
| 74 |
+
Figure 2: Training LVMs for data recovery. (a) Structure of the reconstruction LVM used to infer approximate posteriors $q(x|y)$ of clean data $x$ from corrupted observations $y$ as conditional inputs. (b) Training of $q(x|y)$ using a VAE. A prior in the latent space $z$ is introduced as a regulariser, however no explicit regularisation is imposed in $x$ . (c) Training of $q(x|y)$ using our TAE model. An empirical prior $p(x) = \int p(z_p)p(x|z_p)dz_p$ is instead introduced in clean data space $x$ .
|
| 75 |
+
|
| 76 |
+

|
| 77 |
+
(c) Tomographic Auto-Encoder
|
| 78 |
+
|
| 79 |
+
construct $q(x|y)$ as a conditional latent variable model (LVM). Conditional LVM neural networks have achieved efficient and expressive variational inference in many recovery settings, capturing complex solution spaces in high dimensional problems, such as image reconstruction (Nguyen et al., 2017; Mirza & Osindero, 2014; Adler & Oktem, 2018). The conditional LVM consists of a first conditional distribution $q(z|y)$ mapping input corrupted data $y$ to latent variables $z$ , and a second inference $q(x|z,y)$ mapping latent variables to output clean data $x$ . The resulting PDF is $q(x|y) = \int q(z|y)q(x|z,y)dz$ , where both $q(z|y)$ and $q(x|z,y)$ are simple distributions, such as isotropic Gaussians, whose moments are inferred by neural networks taking the respective conditional arguments as inputs. Figure 2(a) shows a graphical model for the conditional LVM.
|
| 80 |
+
|
| 81 |
+
While the choice of structure is fairly straightforward, the main difficulty lies in training the recovery LVM in the absence of clean ground truths $x$ . In the supervised case, several established methods exist; the observed distributions of clean data $x$ conditioned on paired observations $y$ can be matched by parametric ones through a VAE or GAN training strategy (Sohn et al., 2015; Adler & Oktem, 2018; Tonolini et al., 2020). However, we are instead interested in the unsupervised situation, where we only have corrupted data $Y = \{y_{1:N}\}$ and a functional form for the corrupted data likelihood $p(y|x)$ , e.g., missing values and additive noise. Training a conditional LVM to fit posteriors without any ground truth examples $x$ is rather challenging, as we do not have data to encode from, in the case of VAE architectures, or adversarially compare with, in the case of GAN models.
|
| 82 |
+
|
| 83 |
+
# 2.1 VAES AND THE POSTERIOR COLLAPSE PROBLEM
|
| 84 |
+
|
| 85 |
+
Variational auto-encoders (VAEs) have been proposed for several problems within this definition of unsupervised reconstruction (Dalca et al., 2019; Im et al., 2017; Ainsworth et al., 2018). These methods lead to good single estimates of the underlying targets. However, they easily over-fit their posteriors resulting in collapsed PDFs $q(x|y)$ . Put differently, they are often unable to explore different possible solutions to the recovery problem and return single estimates instead. Figure 1(b-ii) shows this pathology in a two dimensional experiment.
|
| 86 |
+
|
| 87 |
+
The reason for this can be explained considering what the reconstruction LVM $q(x|y)$ is and how it is trained when directly employing a VAE in the unsupervised recovery scenario. The VAE encodes latent vectors $z$ from corrupted observations $y$ with an encoder $q(z|y)$ and reconstructs clean data $x$ with a decoder $p(x|z)$ . These two functions constitute the reconstruction LVM $q(x|y) = \int q(z|y)p(x|z)dz$ . As we do not have clean ground truths $x$ , data likelihood is maximised by mapping reconstructed clean samples $x$ back to corrupted samples $y$ with a corruption process likelihood $p(y|x)$ , e.g. zeroing out missing entries, to maximise reconstruction of the observations $y$ . Concurrently, regularisation in the latent space is induced with a user defined prior $p(z)$ (e.g. a unit Gaussian). The resulting lower bound to be maximised during training can be expressed as follows:
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\mathcal {L} _ {V A E} = \mathbb {E} _ {q (z | y)} \log p (y | z) - K L (q (z | y) | | p (z)), \tag {1}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
where the observations likelihood is $p(y|z) = \int p(x|z)p(y|x)dx$ and in some cases, such as for missing values and additive noise, it is analytical. A derivation is given in supplementary A.1.
|
| 94 |
+
|
| 95 |
+
Viewing the VAE training from a signal reconstruction perspective, where our reconstruction model is $q(x|y) = \int q(z|y)p(x|z)dz$ , we can see that we are not introducing any prior directly on the hidden targets $x$ , but only in the LVM latent space $z$ . While regularising only in $z$ may be computationally desirable, if the decoder $p(x|z)$ is of sufficient capacity, the model can learn to collapse regularised distributions in $z$ to single estimates in $x$ , failing to capture uncertainty. In fact, this is induced by the objective function of equation 1; the model finds broad distributions in the latent space $q(z|y)$ , which minimise the KL divergence with $p(z)$ , but the generator $p(x|z)$ can learn to collapse them back to single maximum likelihood solutions in $x$ , maximising $\mathbb{E}_{q(z|y)}\log \int p(x|z)p(y|x)dx$ . This effect may be counteracted by reducing the capacity of $p(x|z)$ or the dimensionality of $z$ , but doing so also reduces the capacity of the reconstruction model $q(x|y)$ , resulting in an undesirable coupling between regularisation and posterior capacity.
|
| 96 |
+
|
| 97 |
+
# 2.2 SEPARATING POSTERIOR AND PRIOR: THE TOMOGRAPHIC AUTO-ENCODER
|
| 98 |
+
|
| 99 |
+
The premise of our model to address the aforementioned problem is simple: Introduce a prior $p(x)$ in the hidden clean signal space. In particular, we propose to use an empirical prior, having itself the form of an LVM $p(x) = \int p(z_p)p(x|z_p)dz_p$ . In this way, we perform approximate variational inference in clean data space $x$ , instead of surrogating it to the reconstruction function's latent space $z$ . By doing so, we can control the capacity of the prior $p(x)$ to induce regularisation independently of the capacity of our reconstruction model $q(x|y) = \int q(z|y)q(x|z,y)dz$ . For this framework, We can formulate the following ELBO:
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\mathcal {L} _ {T A E} = \mathbb {E} _ {q (x | y)} \log p (y | x) + \mathbb {E} _ {q (x | y)} \left[ \mathbb {E} _ {q (z _ {p} | x)} \log p (x | z _ {p}) - K L (q (z _ {p} | x) | | p (z _ {p})) \right] + H (q (x | y)).
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
The above ELBO is derived in detail in supplementary section A.2. The main technical challenge and focus of this paper is how to compute and maximise the self entropy of the approximate posterior $H(q(x|y))$ , as this conditional distribution is an LVM of the form $q(x|y) = \int q(z|y)q(x|z,y)dz$ .
|
| 106 |
+
|
| 107 |
+
Reduced Entropy Condition: Direct computation of the entropy of an LVM model $q(x|y) = \int_{z}q(z|y)q(x|z,y)dz$ is intractable in the general case. Titsias & Ruiz (2019) proposed an approximate inference method to compute the gradient of the LVM's entropy for variational inference in latent spaces. However, this involves multiple samples to be drawn and evaluated with the LVM, which is expected to scale in complexity as the dimensionality and capacity of the target distribution increase.
|
| 108 |
+
|
| 109 |
+
In our case, we aim to approximately compute and optimise the entropy $H(q(x|y))$ for a distribution capturing natural data, which can be high-dimensional and lie on complicated manifolds. In order to maintain efficiency in the entropy estimation, we propose a new strategy; we identify a class of LVM posteriors for which the entropy reduces to a tractable form and then approximately constrain the posterior to such a class in our optimisation. Our main result is summarized in the following theorem:
|
| 110 |
+
|
| 111 |
+
Theorem 1 If $\frac{q(z|x,y)}{q(z|y)} = B\delta (z - g(x,y))$ , where $\delta (\cdot)$ is the Dirac Delta function, $B$ is a real positive parameter and $g(x,y)$ is a deterministic function, then $H(q(x|y)) = H(q(z|y)) + \mathbb{E}_{q(z|y)}H(q(x|z,y))$ .
|
| 112 |
+
|
| 113 |
+
We detail the proof in supplementary A.3. Theorem 1 states that if the posterior over latent variables $q(z|x,y)$ is infinitely more localised than the latent conditional $q(z|y)$ , then the LVM entropy $H(q(x|y))$ has the tractable form given above. This condition imposes the LVM posterior to present non-overlapping conditionals $q(x|z,y)$ for different latent variables $z$ , but does not impose any explicit restriction to the capacity of the model. We can also formulate the condition as follows:
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
\mathbb {E} _ {q (x, z \mid y)} \log \frac {q (z \mid x , y)}{q (z \mid y)} = C, \quad C \rightarrow \infty . \tag {2}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
The proof is provided in supplementary section A.4. To train our posterior $q(x|y)$ , we aim to maximise the ELBO $\mathcal{L}_{TAE}$ with the reduced entropy, while enforcing the condition of equation 2:
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
\begin{array}{l} \arg \max \mathbb {E} _ {q (x | y)} \log p (y | x) + \mathbb {E} _ {q (x | y)} \left[ \mathbb {E} _ {q (z _ {p} | x)} \log p (x | z _ {p}) - K L (q (z _ {p} | x) | | p (z _ {p})) \right] \\ + H (q (z | y)) + \mathbb {E} _ {q (z | y)} H (q (x | z, y)), \quad s. t. \quad \mathbb {E} _ {q (x, z | y)} \log \frac {q (z | x , y)}{q (z | y)} = C, \quad C \rightarrow \infty . \tag {3} \\ \end{array}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
While the ELBO is now amenable to stochastic optimization, the constraint is intractable since $C \to \infty$ and the posterior $q(z|x,y)$ is intractable.
|
| 126 |
+
|
| 127 |
+
Relaxed Constraint: To render the constraint tractable, we firstly relax $C$ to be a positive hyperparameter. The higher the value of $C$ , the more localised $q(z|x,y)$ is imposed to be compared to $q(z|y)$ and the closest the reduced entropy is to the true one.
|
| 128 |
+
|
| 129 |
+
To address the intractability of the posterior $q(z|x,y)$ , we employ a variational approximation with a parametric function $r(z|x,y)$ . In fact, for any valid probability density $r(z|x,y)$ , we can prove that
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
\mathbb {E} _ {q (x, z \mid y)} \log \frac {q (z \mid x , y)}{q (z \mid y)} \geq \mathbb {E} _ {q (x, z \mid y)} \log \frac {r (z \mid x , y)}{q (z \mid y)}. \tag {4}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
The proof is given in supplementary section A.5. The above bound implicates the following:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\mathbb {E} _ {q (x, z | y)} \log \frac {r (z | x , y)}{q (z | y)} = C \Rightarrow \mathbb {E} _ {q (x, z | y)} \log \frac {q (z | x , y)}{q (z | y)} \geq C.
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
This means that imposing the condition with a parametric distribution $r(z|x,y)$ , which is trained along with the rest of the model, ensures deviation from the set condition only by excess. As the exact condition is met only at $\mathbb{E}_{q(x,z|y)}\log \frac{q(z|x)}{q(z|y)}\to \infty$ , we can never relax the constraint more than already set by the finite value of $C$ .
|
| 142 |
+
|
| 143 |
+
The TAE Objective Function: Having defined a tractable ELBO and a tractable condition, we need to perform the constrained optimisation
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
\begin{array}{l} \arg \max \mathbb {E} _ {q (x | y)} \log p (y | x) + \mathbb {E} _ {q (x | y)} \left[ \mathbb {E} _ {q (z _ {p} | x)} \log p (x | z _ {p}) - K L (q (z _ {p} | x) | | p (z _ {p})) \right] \\ + H (q (z | y)) + \mathbb {E} _ {q (z | y)} H (q (x | z, y)), \quad s. t. \quad \mathbb {E} _ {q (x, z | y)} \log \frac {r (z | x , y)}{q (z | y)} = C. \tag {5} \\ \end{array}
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+
We use the commonly adopted penalty function method (Zangwill, 1967; Phuong et al., 2018) and relax equation 5 to an unconstrained optimisation with the use of a positive hyper-parameter $\lambda$ :
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\begin{array}{l} \arg \max \mathbb {E} _ {q (x | y)} \log p (y | x) + \mathbb {E} _ {q (x | y)} \left[ \mathbb {E} _ {q \left(z _ {p} | x\right)} \log p (x | z _ {p}) - K L \left(q \left(z _ {p} | x\right) \| p \left(z _ {p}\right)\right) \right] \\ + H (q (z | y)) + \mathbb {E} _ {q (z | y)} H (q (x | z, y)) - \lambda \left| \mathbb {E} _ {q (z, x | y)} \log \frac {r (z | x , y)}{q (z | y)} - C \right|. \tag {6} \\ \end{array}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
To train the model, we perform the maximisation of equation 6 using the ADAM optimiser. Once the model is trained, we can generate diverse reconstructions from a corrupt observation $y_{i}$ by sampling from the posterior $q(x|y_i)$ . Details of our optimisation are reported in supplementary B.1. We describe how we handle parameters of the corruption process $p(y|x)$ in supplementary B.2.
|
| 156 |
+
|
| 157 |
+
# 3 RELATED WORK
|
| 158 |
+
|
| 159 |
+
# 3.1 SUPERVISED BAYESIAN RECONSTRUCTION
|
| 160 |
+
|
| 161 |
+
The reconstruction of posterior densities from incomplete measurements has been recently investigated in supervised situations, where examples of clean data are available. In particular, conditional generative models were demonstrated with high dimensional data (Parmar et al., 2018). These methods work by exploiting an LVM to generate diverse realisations of targets conditioned on associated observations (Isola et al., 2017; Nguyen et al., 2017). Both conditional generative adversarial networks (CGANs) (Mirza & Osindero, 2014; Isola et al., 2017) and conditional VAEs (CVAEs) (Sohn et al., 2015; Nguyen et al., 2017) have been studied in this context. In both cases, the samples generated by conditioning on an observation can be interpreted as samples from the corresponding conditional posterior densities.
|
| 162 |
+
|
| 163 |
+
These approaches proved successful in a range of recovery tasks: reconstruction of images with missing groups of pixels (Nguyen et al., 2017), super-resolution (Parmar et al., 2018), medical computed tomography reconstructions (Adler & Oktem, 2018) and semi supervised situations, where examples of clean data and conditions are available in different amounts (Kingma et al., 2014; Denton et al., 2016; Tonolini et al., 2020). Other works reconstruct manifolds of solutions from observations only, but can be considered supervised, as they exploit pre-trained generators (Anirudh et al., 2018).
|
| 164 |
+
|
| 165 |
+
These works make the important observation that when learning to recover data from corrupted or partial observations, there is not a single right solution, but many differently likely ones. We aim to extend this ability to completely unsupervised scenarios, where no clean data examples are available.
|
| 166 |
+
|
| 167 |
+
# 3.2 UNSUPERVISED BAYESIAN RECONSTRUCTION
|
| 168 |
+
|
| 169 |
+
Reconstructing posteriors in the unsupervised case is largely still an open problem. However, several tasks that fall within this definition have been recently approached with Bayesian machine learning methods. Arguably the most investigated is de-noising. Several works solve this problem by exploiting the natural tendency of neural networks to regularise outputs (Lehtinen et al., 2018; Krull et al., 2019a;b). Other methods build LVMs that explicitly model the noise process in their decoder, retrieving clean samples upon encoding and generation (Im et al., 2017; Creswell & Bharath, 2018).
|
| 170 |
+
|
| 171 |
+
A second notable example is that of missing value imputation. Corrupted data corresponds to samples with missing entries. Recent works have explored the use of LVMs to perform imputation, both with GANs (Li et al., 2019; Yoon et al., 2018; Luo et al., 2018) and VAEs (Nazabal et al., 2018; Mattei & Frellsen, 2019; Ma et al., 2018). In the former, the discriminator of the GAN is trained to distinguish real values from imputed ones, such that the generator is induced to synthesise realistic imputations. In the latter, the encoder of a VAE maps incomplete samples to a latent space, to then generate complete samples. Successful unsupervised Bayesian missing value imputation has also been demonstrated with neural processes, where a global latent representation is learned to generate input-output models used to impute in each example (Garnelo et al., 2018).
|
| 172 |
+
|
| 173 |
+
Finally, Bayesian LVM methods have been used on other unsupervised tasks that can be cast as special cases of data recovery problems. Amongst these, we find Multi-view generation (Shang et al., 2017; Ainsworth et al., 2018), where the target clean data includes all views for each samples, but the observed data only presents subsets. Blind source separation can also be cast as a recovery problem and has been approached with GANs and VAEs (Kameoka et al., 2018; Hoshen, 2019).
|
| 174 |
+
|
| 175 |
+
These models proved to be successful at reconstructing data in their specific domain. However, in our work, we show how exploiting a standard VAE inference structure, similarly to several of the aforementioned methods, often leads to posteriors of clean data that collapse on single estimates, sacrificing the probabilistic capability of LVMs.
|
| 176 |
+
|
| 177 |
+
# 3.3 POSTERIOR COLLAPSE IN VARIATIONAL INFERENCE
|
| 178 |
+
|
| 179 |
+
The posterior collapse problem we approach with the reduced entropy condition method presented in this paper has some analogy with the latent posterior collapse encountered when using implicit distributions in variational inference to obtain flexible recognition models. The main issue in training these models successfully without collapse is the computation of density rations between the latent prior and the implicit variational posterior. This problem is analogous to the difficulty in estimating the LVM entropy in our method. Yin & Zhou (2018) proposed to use a further lower bound on the ELBO and add a term encouraging diversity to avoid collapse. This term is obtained by drawing $K$ Gaussian components from the LVM posterior and computing the KL divergence of an individual component with the mixture distribution, i.e. the sum of the drawn Gaussians. Titsias & Ruiz (2019) build on this work by deriving an unbiased estimator for the ELBO gradient, instead of using a surrogate lower bound.
|
| 180 |
+
|
| 181 |
+
These methods rely on estimating the LVM posterior through sampling and aggregating $K$ explicit distribution components. This was demonstrated to work well for posteriors in artificial latent space by drawing only a few components. However, in our data recovery setting, we need to capture the posteriors in clean data space, rather than a latent space. Posteriors capturing the uncertainty in natural data are expected to be much more complex and higher dimensional, leading to the number $K$ of drawn Gaussians needed to approximate the true LVM with these methods to become rather large, making optimisation inefficient or even intractable in extreme cases. The reduced entropy condition method we derive in this paper avoids the posterior collapse without having to estimate the LVM posterior through sampling and is therefore specially suited for the data recovery setting, where we are required to capture posteriors in clean data space.
|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
(a) Recovery Examples
|
| 185 |
+
Figure 3: MNIST data recovery from missing entries and noise. (a) Recoveries using an MVAE and our TAE, showing average reconstruction and samples from the trained posteriors. (b) PSNR between ground truths and mean reconstruction. (c) ELBO assigned by the recovered posteriors to the ground truth data. The mean inference performance is very similar for the two models (PSNR values), while the probabilistic performance (ELBO values) is significantly higher for our TAE model. We can see evidence of this difference in the reconstruction examples. The MVAE and TAE return similarly adequate mean solutions, but the MVAE posterior's draws are all very similar, suggesting that the posterior has collapsed on a particular reconstruction. Contrarily, the posteriors returned by the TAE explore different possible solutions that are consistent with the associated corrupted observation.
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
(b) Mean Inference Performance
|
| 189 |
+
|
| 190 |
+

|
| 191 |
+
(c) Probabilistic Performance
|
| 192 |
+
|
| 193 |
+
Table 1: Bayesian recovery from noisy data with different percentages of missing entries. Table shows the ELBO assigned by the retrieved posteriors to the ground truth clean data. Our TAE model consistently returns higher ELBO values compared to the competing variational methods, as it is able to retrieve rich posteriors that adequately sample the solution space. More values in supp. D.3.
|
| 194 |
+
|
| 195 |
+
<table><tr><td></td><td colspan="2">MNIST</td><td colspan="2">Fashion-MNIST</td><td colspan="2">UCI HAR</td></tr><tr><td></td><td>50%</td><td>80%</td><td>50%</td><td>80%</td><td>50%</td><td>80%</td></tr><tr><td>MVAE</td><td>870 ± 6</td><td>803 ± 15</td><td>757 ± 1</td><td>723 ± 7</td><td>585 ± 4</td><td>471 ± 10</td></tr><tr><td>MIWAE</td><td>917 ± 4</td><td>780 ± 6</td><td>800 ± 7</td><td>766 ± 8</td><td>613 ± 6</td><td>584 ± 8</td></tr><tr><td>TAE</td><td>1719 ± 7</td><td>1536 ± 14</td><td>1326 ± 7</td><td>1094 ± 13</td><td>1014 ± 6</td><td>854 ± 52</td></tr></table>
|
| 196 |
+
|
| 197 |
+
# 4 EXPERIMENTS
|
| 198 |
+
|
| 199 |
+
# 4.1 POSTERIOR RECOVERY
|
| 200 |
+
|
| 201 |
+
We corrupt the MNIST dataset (Deng, 2012) by introducing missing values and additive Gaussian noise on the observed entry. We then train both a missing value imputation VAE (MVAE), analogous to those presented in (Nazabal et al., 2018) and (Dalca et al., 2019), and our TAE model with the corrupted data sets. The VAE and TAE are constructed such that the structure of their posteriors $q(x|y)$ , i.e. the functions mapping corrupted data to distributions of clean data at test time, are exactly the same. In this way, we can ensure that differences in performance are due to the variational inference method employed and not the choice of posterior model. The resulting variational posteriors are used to perform data recovery from the corrupted samples. Fig. 3(a) shows examples of mean reconstruction and posterior draws. See analogous experiments for grouped missings in suppl. D.2.
|
| 202 |
+
|
| 203 |
+
We evaluate the accuracy of mean reconstruction at different ratios of observed entries by measuring the peak signal to noise ratio (PSNR) between the ground truth data and mean recoveries (Figure 3(b)). To evaluate probabilistic performance we approximately measure the likelihood assigned by the recovered posteriors to the ground truth data through a reconstruction ELBO, by training a new inference function with the clean ground truths, but leaving the posterior fixed, as is common for evaluating ELBOs in unsupervised settings (Cremer et al., 2018; Mattei & Frellsen, 2018; 2019). A detailed description of this approach is given in supplementary C.3. We also carry out analogous experiments testing de-noising and missing value imputation separately. THese results are reported in supplementary D.4 and D.5. Results are shown in figure 3(c). We further evaluate our TAE with Fashion-MNIST - $28 \times 28$ grey-scale images of clothing (Xiao et al., 2017), and the UCI HAR dataset, which consists of filtered accelerometer signals from mobile phones worn by different people
|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
Corrupt Data y
|
| 207 |
+
|
| 208 |
+

|
| 209 |
+
Draws from MVAE Posterior
|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
Draws From TAE Posterior
|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
|
| 220 |
+

|
| 221 |
+
Classification Probability
|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
Figure 4: Propagating uncertainty to a classification task. Draws from the MVAE posterior are all very similar to each other. As a result, the imputed images are almost always classified in the same way and the uncertainty of the task is underestimated. The TAE posterior explores varied possible solutions to the recovery task. These can be recognised as different classes, resulting in less concentrated distributed probabilities that better reflect the associated uncertainty.
|
| 225 |
+
|
| 226 |
+
during common activities (Anguita et al., 2012). As before, we test the recovery of these data sets from a version affected by missing values and additive noise. In addition to the MVAE baseline, we compared against the recently proposed missing values importance weighted auto encoder (MIWAE) (Mattei & Frellsen, 2019), which optimises an importance weighted ELBO in place of the standard one. For each model and settings we compute the ELBO assigned to the ground truth data. Results are shown in Table 2. Experimental details in Sec. C of suppl. mat.
|
| 227 |
+
|
| 228 |
+
# 4.2 DOWNSSTREAM TASKS
|
| 229 |
+
|
| 230 |
+
To investigate the advantage of capturing complex uncertainties with our TAE model, we are interested in testing performance in downstream tasks. We test classification performance on subsets of the MNIST and Fashion-MNIST data sets, after recovery with our TAE. With both sets, we consider situations in which 10,000 examples are available, but corrupted with missing entries and noise. 1,000 of these are labelled with one of 10 possible classes and we wish to classify the remaining 9,000. To do so, we first train the TAE model on the full set, then use the recovered posteriors to generate multiple possible cleaned data for the labelled sub-set and use them to train a classifier.
|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
Figure 5: Classification accuracy after imputation. Classifying using TAE imputations gives an advantage in this downstream task over using raw corrupted data and MVAE imputations, especially when the number of missing entries is high. This is because the MVAE collapses on single imputations, while the TAE generates diverse samples for each corrupted observation. The TAE classifier trains with data augmentations consistent with observed corrupted images, instead of single estimates.
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
|
| 237 |
+
To perform classification on the 9,000 remaining examples, we generate multiple possible cleaned data with the variational posteriors. Then, for each posterior sample, we perform classification and histogram the results. Examples are shown in figure 4. To evaluate the performance, we take the class with the largest histogram as the inferred one. We repeat this experiment for different ratios of
|
| 238 |
+
|
| 239 |
+
missing values and several repetitions, varying the subsets of labelled and unlabelled data to be used. Classification accuracy results are shown in figure 5.
|
| 240 |
+
|
| 241 |
+
# 4.3 MISSING VALUES IN THE NYU DEPTH MAPS
|
| 242 |
+
|
| 243 |
+
As a final practical application, we use a convolutional version of our TAE to perform structured missing value imputation on depth maps of indoors rooms collected with a Kinect depth sensor. Missing entries are very common in depth maps recorded with such structured light sensors (Scharstein & Szeliski, 2003). We use raw depth data from the NYU rooms dataset, commonly used to test various computer vision systems (Silberman & Fergus, 2011; Silberman et al., 2012; Dólar & Zitnick, 2013; Chang et al., 2018). A large portion of the set is available only as raw data, which presents missing entries. These are especially concentrated around objects' edges and reflecting surfaces, breaking the common assumption of missing at random, making this task particularly challenging. We train our TAE with a subset of this raw data set to perform imputation. Examples of results are shown in figure 11. Additional examples are shown in supplementary section D.6.
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
Figure 6: Unsupervised missing value imputation with our TAE on raw depth maps from the NYU rooms data set, compared with a median filter approach and the standard MVAE. Missing pixels in the observed images are in white. The median filter results in overly smoothed images and is unable to fill pixels that are surrounded by large missing areas. The MVAE returns adequate reconstructions, however, it over-fits to inaccurate solutions in certain locations, returning low uncertainty. The TAE returns good reconstructions and assigns high uncertainty to locations where reconstructions are most inaccurate, as shown by the marginal standard deviations.
|
| 247 |
+
|
| 248 |
+
# 5 CONCLUSION
|
| 249 |
+
|
| 250 |
+
We presented tomographic auto-encoders; a variational inference method for recovering posterior distributions of clean data from a corrupted data set alone. We derive the reduced entropy condition method; a novel inference strategy that results in rich distributions of clean data given corrupted observations, thereby capturing the uncertainty of the task, while standard variational methods often collapse on single answers. In our experiments, we demonstrate this capability and show the advantage of capturing uncertainty with the TAE in downstream tasks, outperforming the state-of-the-art VAE based recovery methods.
|
| 251 |
+
|
| 252 |
+
# ACKNOWLEDGEMENTS
|
| 253 |
+
|
| 254 |
+
F.T. and R.M-S. acknowledge funding support from Amazon and EPSRC grants EP/M01326X/1, EP/T00097X/1 (QuantIC - UK Quantum Technology Hub in Quantum Enhanced Imaging). R.M-S. acknowledges funding support from EP/R018634/1 (Closed-Loop Data Science for Complex, Computationally- and Data-Intensive Analytics).
|
| 255 |
+
|
| 256 |
+
# REFERENCES
|
| 257 |
+
|
| 258 |
+
Jonas Adler and Ozan Öktem. Deep Bayesian inversion. arXiv preprint arXiv:1811.05910, 2018.
|
| 259 |
+
Samuel K. Ainsworth, Nicholas J. Foti, and Emily B. Fox. Disentangled VAE representations for multi-aspect and missing data. arXiv preprint arXiv:1806.09060, 2018.
|
| 260 |
+
D. Anguita, A. Ghio, L. Oneto, X. Parra, and J. L. Reyes-Ortiz. Human activity recognition on smartphones using a multiclass hardware-friendly support vector machine. In Proceedings of the International Workshop on Ambient Assisted Living, pp. 216–223. Springer, 2012.
|
| 261 |
+
Rushil Anirudh, Jayaraman J. Thiagarajan, Bhavya Kailkhura, and Timo Bremer. An unsupervised approach to solving inverse problems using generative adversarial networks. arXiv preprint arXiv:1805.07281, 2018.
|
| 262 |
+
Angel Chang, Angela Dai, Thomas Allen Funkhouser, Maciej Halber, Matthias Niebner, Manolis Savva, Shuran Song, Andy Zeng, and Yinda Zhang. Matterport3D: Learning from RGB-D data in indoor environments. In 7th IEEE International Conference on 3D Vision, 3DV 2017, pp. 667-676. Institute of Electrical and Electronics Engineers Inc., 2018.
|
| 263 |
+
Xu Chu, Ihab F. Ilyas, Sanjay Krishnan, and Jiannan Wang. Data cleaning: Overview and emerging challenges. In Proceedings of the 2016 International Conference on Management of Data, pp. 2201-2206. ACM, 2016.
|
| 264 |
+
Chris Cremer, Xuechen Li, and David Duvenaud. Inference suboptimality in variational autoencoders. In Proc. 35th Inter. Conference on Machine Learning, PMLR 80, 2018.
|
| 265 |
+
Antonia Creswell and Anil Anthony Bharath. Denoising adversarial autoencoders. IEEE Transactions on neural networks and learning systems, 30(4):968-984, 2018.
|
| 266 |
+
Adrian V. Dalca, John Guttag, and Mert R Sabuncu. Unsupervised data imputation via variational inference of deep subspaces. arXiv preprint arXiv:1903.03503, 2019.
|
| 267 |
+
Li Deng. The MNIST database of handwritten digit images for machine learning research [best of the web]. IEEE Signal Processing Magazine, 29(6):141-142, 2012.
|
| 268 |
+
Emily Denton, Sam Gross, and Rob Fergus. Semi-supervised learning with context-conditional generative adversarial networks. arXiv preprint arXiv:1611.06430, 2016.
|
| 269 |
+
Piotr Dólár and C. Lawrence Zitnick. Structured forests for fast edge detection. In Proceedings of the IEEE international conference on computer vision, pp. 1841-1848, 2013.
|
| 270 |
+
Marta Garnelo, Dan Rosenbaum, Christopher Maddison, Tiago Ramalho, David Saxton, Murray Shanahan, Yee Whye Teh, Danilo Jimenez Rezende, and SM Ali Eslami. Conditional neural processes. In ICML, 2018.
|
| 271 |
+
Lucas L. Geyer, U. Joseph Schoepf, Felix G Meinel, John W. Nance Jr, Gorka Bastarrika, Jonathon A. Leipsic, Narinder S. Paul, Marco Rengo, Andrea Laghi, and Carlo N. De Cecco. State of the art: iterative CT reconstruction techniques. Radiology, 276(2):339-357, 2015.
|
| 272 |
+
Yedid Hoshen. Towards unsupervised single-channel blind source separation using adversarial pair unmix-and-remix. In ICASSP 2019-2019 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 3272-3276. IEEE, 2019.
|
| 273 |
+
Daniel Im Jiwoong Im, Sungjin Ahn, Roland Memisevic, and Yoshua Bengio. Denoising criterion for variational auto-encoding framework. In Thirty-First AAAI Conference on Artificial Intelligence, 2017.
|
| 274 |
+
Phillip Isola, Jun-Yan Zhu, Tinghui Zhou, and Alexei A. Efros. Image-to-image translation with conditional adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1125-1134, 2017.
|
| 275 |
+
Hirokazu Kameoka, Li Li, Shota Inoue, and Shoji Makino. Semi-blind source separation with multichannel variational autoencoder. arXiv preprint arXiv:1808.00892, 2018.
|
| 276 |
+
|
| 277 |
+
Durk P. Kingma, Shakir Mohamed, Danilo Jimenez Rezende, and Max Welling. Semi-supervised learning with deep generative models. In Advances in neural information processing systems, pp. 3581-3589, 2014.
|
| 278 |
+
Alexander Krull, Tim-Oliver Buchholz, and Florian Jug. Noise2void-learning denoising from single noisy images. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2129-2137, 2019a.
|
| 279 |
+
Alexander Krull, Tomas Vicar, and Florian Jug. Probabilistic Noise2Void: unsupervised content-aware denoising. arXiv preprint arXiv:1906.00651, 2019b.
|
| 280 |
+
Sang Kyu Kwak and Jong Hae Kim. Statistical data preparation: management of missing values and outliers. Korean journal of anesthesiology, 70(4):407, 2017.
|
| 281 |
+
Jaakko Lehtinen, Jacob Munkberg, Jon Hasselgren, Samuli Laine, Tero Karras, Miika Aittala, and Timo Aila. Noise2noise: Learning image restoration without clean data. arXiv preprint arXiv:1803.04189, 2018.
|
| 282 |
+
Steven Cheng-Xian Li, Bo Jiang, and Benjamin Marlin. Misgan: Learning from incomplete data with generative adversarial networks. arXiv preprint arXiv:1902.09599, 2019.
|
| 283 |
+
Yonghong Luo, Xiangrui Cai, Ying Zhang, Jun Xu, et al. Multivariate time series imputation with generative adversarial networks. In Advances in Neural Information Processing Systems, pp. 1596-1607, 2018.
|
| 284 |
+
Chao Ma, Sebastian Tschiatschek, Konstantina Palla, José Miguel Hernández-Lobato, Sebastian Nowozin, and Cheng Zhang. Eddi: Efficient dynamic discovery of high-value information with partial vae. arXiv preprint arXiv:1809.11142, 2018.
|
| 285 |
+
Pierre-Alexandre Mattei and Jes Frellsen. Leveraging the exact likelihood of deep latent variable models. In Advances in Neural Information Processing Systems, pp. 3855-3866, 2018.
|
| 286 |
+
Pierre-Alexandre Mattei and Jes Frellsen. MIWAE: deep generative modelling and imputation of incomplete data sets. In International Conference on Machine Learning, pp. 4413-4423, 2019.
|
| 287 |
+
Mehdi Mirza and Simon Osindero. Conditional generative adversarial nets. arXiv preprint arXiv:1411.1784, 2014.
|
| 288 |
+
M. Arthur Munson. A study on the importance of and time spent on different modeling steps. ACM SIGKDD Explorations Newsletter, 13(2):65-71, 2012.
|
| 289 |
+
Alfredo Nazabal, Pablo M. Olmos, Zoubin Ghahramani, and Isabel Valera. Handling incomplete heterogeneous data using VAEs. arXiv preprint arXiv:1807.03653, 2018.
|
| 290 |
+
Anh Nguyen, Jeff Clune, Yoshua Bengio, Alexey Dosovitskiy, and Jason Yosinski. Plug & play generative networks: Conditional iterative generation of images in latent space. In CVPR, volume 2, pp. 7, 2017.
|
| 291 |
+
Niki Parmar, Ashish Vaswani, Jakob Uszkoreit, Łukasz Kaiser, Noam Shazeer, Alexander Ku, and Dustin Tran. Image transformer. arXiv preprint arXiv:1802.05751, 2018.
|
| 292 |
+
Mary Phuong, Max Welling, Nate Kushman, Ryota Tomioka, and Sebastian Nowozin. The mutual autoencoder: Controlling information in latent code representations. In ICLR, 2018.
|
| 293 |
+
Daniel Scharstein and Richard Szeliski. High-accuracy stereo depth maps using structured light. In 2003 IEEE Computer Society Conference on Computer Vision and Pattern Recognition, 2003. Proceedings., volume 1, pp. I-I. IEEE, 2003.
|
| 294 |
+
Chao Shang, Aaron Palmer, Jiangwen Sun, Ko-Shin Chen, Jin Lu, and Jinbo Bi. VIGAN: Missing view imputation with generative adversarial networks. In 2017 IEEE International Conference on Big Data (Big Data), pp. 766-775. IEEE, 2017.
|
| 295 |
+
N. Silberman and R. Fergus. Indoor scene segmentation using a structured light sensor. In Proceedings of the International Conference on Computer Vision - Workshop on 3D Representation and Recognition, 2011.
|
| 296 |
+
|
| 297 |
+
Nathan Silberman, Derek Hoiem, Pushmeet Kohli, and Rob Fergus. Indoor segmentation and support inference from RGB-D images. In European conference on computer vision, pp. 746-760. Springer, 2012.
|
| 298 |
+
Kihyuk Sohn, Honglak Lee, and Xinchen Yan. Learning structured output representation using deep conditional generative models. In Advances in Neural Information Processing Systems, pp. 3483-3491, 2015.
|
| 299 |
+
Michalis K. Titsias and Francisco Ruiz. Unbiased implicit variational inference. In The 22nd International Conference on Artificial Intelligence and Statistics, pp. 167-176, 2019.
|
| 300 |
+
Francesco Tonolini, Jack Radford, Alex Turpin, Daniele Faccio, and Roderick Murray-Smith. Variational inference for computational imaging inverse problems. Journal of Machine Learning Research, 21(179):1-46, 2020.
|
| 301 |
+
Ian R. White, Patrick Royston, and Angela M Wood. Multiple imputation using chained equations: issues and guidance for practice. Statistics in medicine, 30(4):377-399, 2011.
|
| 302 |
+
H. Xiao, K. Rasul, and R. Vollgraf. Fashion-MNIST: a novel image dataset for benchmarking machine learning algorithms. arXiv:1708.07747, 2017.
|
| 303 |
+
Mingzhang Yin and Mingyuan Zhou. Semi-implicit variational inference. In International Conference on Machine Learning, pp. 5660-5669, 2018.
|
| 304 |
+
Jinsung Yoon, James Jordan, and Mihaela Van Der Schaar. Gain: Missing data imputation using generative adversarial nets. arXiv preprint arXiv:1806.02920, 2018.
|
| 305 |
+
Willard I. Zangwill. Non-linear programming via penalty functions. Management science, 13(5): 344-358, 1967.
|
| 306 |
+
Lina Zhou, Shimei Pan, Jianwu Wang, and Athanasios V Vasilakos. Machine learning on big data: Opportunities and challenges. Neurocomputing, 237:350-361, 2017.
|
| 307 |
+
|
| 308 |
+
# Tomographic Auto-Encoder - Supplementary material
|
| 309 |
+
|
| 310 |
+
# A PROOFS AND DERIVATIONS
|
| 311 |
+
|
| 312 |
+
# A.1 DERIVATION OF VAE ELBO FOR DATA RECOVERY
|
| 313 |
+
|
| 314 |
+
We aim to maximise the log likelihood of the observed corrupted data $y$
|
| 315 |
+
|
| 316 |
+
$$
|
| 317 |
+
\log p (y) = \log \int_ {x} \underbrace {\int_ {z} p (z) p (x | z) d z} _ {p (x)} p (y | x) d x. \tag {7}
|
| 318 |
+
$$
|
| 319 |
+
|
| 320 |
+
We can introduce a variational distribution in both clean data space and latent space $q(x,z|y)$ and define a lower bound as
|
| 321 |
+
|
| 322 |
+
$$
|
| 323 |
+
\begin{array}{l} \log p (y) \geq \int_ {x} \int_ {z} q (x, z | y) \log \frac {p (z) p (x | z) d z}{q (x , z | y)} d z d x \tag {8} \\ + \int_ {x} \int_ {z} q (x, z | y) \log p (y | x) d z d x. \\ \end{array}
|
| 324 |
+
$$
|
| 325 |
+
|
| 326 |
+
To obtain the VAE ELBO used in data recovery settings, the choice of the variational posterior is $q(x,z|y) = q(z|y)p(x|z)$ . The ELBO can then be simplified to give
|
| 327 |
+
|
| 328 |
+
$$
|
| 329 |
+
\begin{array}{l} \log p (y) \geq \int_ {x} \int_ {z} q (z | y) p (x | z) \log \frac {p (z) p (x | z) d z}{q (z | y) p (x | z)} d z d x \\ + \int_ {x} \int_ {z} q (z | y) p (x | z) \log p (y | x) d z d x \\ = \underbrace {\int_ {x} p (x | z) d x} _ {= 1} \int_ {z} q (z | y) \log \frac {p (z) d z}{q (z | y)} d z \tag {9} \\ + \int_ {x} \int_ {z} q (z | y) p (x | z) d z \log p (y | x) d x \\ \end{array}
|
| 330 |
+
$$
|
| 331 |
+
|
| 332 |
+
For situations in which the observations' likelihood $\int_{x}p(x|z)p(y|x)dx$ has a closed form, such as additive noise and missing entries, we can define a tighter bound to the likelihood by moving the integral in $x$ in the second term inside the logarithm
|
| 333 |
+
|
| 334 |
+
$$
|
| 335 |
+
\begin{array}{l} \log p (y) \geq \int_ {z} q (z | y) \log \frac {p (z) d z}{q (z | y)} d z \\ + \int_ {z} q (z | y) \log \left[ \int_ {x} p (y | x) p (x | z) d x \right] d z \tag {10} \\ = - K L (q (z | y) \mid \mid p (z)) + \mathbb {E} _ {q (z | y)} \log p (y | z) d x. \\ \end{array}
|
| 336 |
+
$$
|
| 337 |
+
|
| 338 |
+
Because $p(x|z)$ simplifies in the KL term, this ELBO avoids variational inference in the space of clean data $x$ .
|
| 339 |
+
|
| 340 |
+
# A.2 DERIVATION OF TAE ELBO
|
| 341 |
+
|
| 342 |
+
In our TAE model we defined separate LVMs for prior and posterior. To distinguish between the posterior latent variable and the prior latent variable, we name the former $z$ and the latter $z_{p}$ . The likelihood we aim to maximise is
|
| 343 |
+
|
| 344 |
+
$$
|
| 345 |
+
\log p (y) = \log \int_ {x} \underbrace {\int_ {z _ {p}} p \left(z _ {p}\right) p \left(x \mid z _ {p}\right) d z _ {p}} _ {p (x)} p (y \mid x) d x. \tag {11}
|
| 346 |
+
$$
|
| 347 |
+
|
| 348 |
+
Similarly to the VAE ELBO case, we define a variational posterior $q(x,z_{p}|y)$ to find a lower bound
|
| 349 |
+
|
| 350 |
+
$$
|
| 351 |
+
\begin{array}{l} \log p (y) \geq \int_ {x} \int_ {z _ {p}} q (x, z _ {p} | y) \log \frac {p \left(z _ {p}\right) p \left(x \mid z _ {p}\right) d z _ {p}}{q \left(x , z _ {p} \mid y\right)} d z _ {p} d x \tag {12} \\ + \int_ {x} \int_ {z _ {p}} q (x, z _ {p} | y) \log p (y | x) d z _ {p} d x. \\ \end{array}
|
| 352 |
+
$$
|
| 353 |
+
|
| 354 |
+
However, in our model we do not make the assumption that the variational posterior has the special form described in section A.1 and instead set it to have the form $q(x,z_{p}|y) = q(x|y)q(z_{p}|x)$ , separating posterior inference from observations $y$ to clean data $x$ and inference of prior latent variables $z_{p}$ . The resulting lower bound is
|
| 355 |
+
|
| 356 |
+
$$
|
| 357 |
+
\begin{array}{l} \log p (y) \geq \int_ {x} \int_ {z _ {p}} q (x | y) q (z _ {p} | x) \log \frac {p (z _ {p}) p (x | z _ {p})}{q (x | y) q (z _ {p} | x)} d z _ {p} d x + \int_ {x} \int_ {z _ {p}} q (x | y) q (z _ {p} | x) \log p (y | x) d z _ {p} d x \\ = \int_ {x} q (x | y) \underbrace {\int_ {z _ {p}} q (z _ {p} | x) \log \frac {p (z _ {p}) p (x | z _ {p})}{q (z _ {p} | x)} d z _ {p}} _ {\geq \log p (x)} d x + \int_ {x} \underbrace {\int_ {z _ {p}} q (z _ {p} | x)} _ {= 1} d z _ {p} q (x | y) \log p (y | x) d x \\ -\int_{x}\underbrace{\int_{z_{p}}q(z_{p}|x)dz_{p}}_{= 1}q(x|y)\log q(x|y)dx \\ = \mathbb {E} _ {q (x \mid y)} \left[ \mathbb {E} _ {q \left(z _ {p} \mid x\right)} \log p (x \mid z _ {p}) - K L \left(q \left(z _ {p} \mid x\right) \mid \mid p \left(z _ {p}\right)\right) \right] + \mathbb {E} _ {q (x \mid y)} \log p (y \mid x) + H (q (x \mid y)). \tag {13} \\ \end{array}
|
| 358 |
+
$$
|
| 359 |
+
|
| 360 |
+
# A.3 PROOF OF THEOREM 1
|
| 361 |
+
|
| 362 |
+
$$
|
| 363 |
+
\begin{array}{l} \frac {q (z | x , y)}{q (z | y)} = B \delta (z - g (x, y)) \Rightarrow \frac {q (z | x , y)}{q (z | y)} \frac {q (z ^ {\prime} | x , y)}{q (z ^ {\prime} | y)} = 0, \quad \forall x, z \neq z ^ {\prime} \\ \Longrightarrow \frac {q (x \mid z , y)}{q (x \mid y)} \frac {q (x \mid z ^ {\prime} , y)}{q (x \mid y)} = 0, \quad \forall x, z \neq z ^ {\prime} \tag {14} \\ \Longrightarrow q (x \mid z, y) q (x \mid z ^ {\prime}, y) = 0, \quad \forall x, z \neq z ^ {\prime} \\ \Rightarrow q (x \mid z ^ {\prime}, y) = 0, \quad \forall x \sim q (x \mid z, y), z \neq z ^ {\prime} \\ \end{array}
|
| 364 |
+
$$
|
| 365 |
+
|
| 366 |
+
Using the result of equation 14, we can derive the form of the entropy $H(q(x|y))$ for this special case as the following:
|
| 367 |
+
|
| 368 |
+
$$
|
| 369 |
+
\begin{array}{l} H (q (x | y)) = - \int_ {x} \left[ \int_ {z} q (z | y) q (x | z, y) d z \right] \cdot \log \left[ \int_ {z ^ {\prime}} q \left(z ^ {\prime} | y\right) q \left(x \mid z ^ {\prime}, y\right) d z ^ {\prime} \right] d x \\ = - \int_ {x} \int_ {z} q (z | y) q (x | z, y) \cdot \log \left[ \int_ {z ^ {\prime} = z} q \left(z ^ {\prime} \mid y\right) q \left(x \mid z ^ {\prime}, y\right) d z ^ {\prime} \right. \\ + \underbrace {\int_ {z ^ {\prime} \neq z} q \left(z ^ {\prime} | y\right) q \left(x \mid z ^ {\prime} , y\right) d z ^ {\prime}} _ {e q. 1 4 \Longrightarrow = 0} ] d z d x \tag {15} \\ = - \int_ {z} \int_ {x} q (z | y) q (x | z, y) \log [ q (z | y) q (x | z, y) ] d x d z \\ = - \int_ {z} \int_ {x} q (z | y) q (x | z, y) \log q (z | y) d x d z - \int_ {z} \int_ {x} q (z | y) q (x | z, y) \log q (x | z, y) d x d z \\ = - \int_ {z} q (z | y) \log q (z | y) d z - \int_ {z} q (z | y) \int_ {x} q (x | z, y) \log q (x | z, y) d x d z \\ = H (q (z | y)) + \mathbb {E} _ {q (z | y)} H (q (x | z, y)). \\ \end{array}
|
| 370 |
+
$$
|
| 371 |
+
|
| 372 |
+
# A.4 PROOF OF THE EQUIVALENCE BETWEEN CONDITIONS
|
| 373 |
+
|
| 374 |
+
proof of necessary condition:
|
| 375 |
+
|
| 376 |
+
$$
|
| 377 |
+
\begin{array}{l} \mathbb {E} _ {q (x, z | y)} \log {\frac {q (z | x , y)}{q (z | y)}} = \int_ {z} q (z | y) \int_ {x} q (x | z, y) \log {\frac {q (z | x , y)}{q (z | y)}} d x d z \\ = \int_ {x} q (x | y) \int_ {z} q (z | x, y) \log \frac {q (z | x , y)}{q (z | y)} d z d x \\ = \int_ {x} q (x | y) \int_ {z} q (z | x, y) \log q (z | x, y) d z d x \\ - \int_ {x} q (x | y) \int_ {z} q (z | x, y) \log q (z | y) d z d x \\ = \int_ {x} q (x | y) \int_ {z} q (z | x, y) \log q (z | x, y) d z d x \tag {16} \\ - \underbrace {\int_ {x} q (x | z , y) d x} _ {= 1} \int_ {z} q (z | y) \log q (z | y) d z \\ = \mathbb {E} _ {q (x \mid y)} \underbrace {\int_ {z} q (z \mid x , y) \log q (z \mid x , y) d z} _ {- H (q (z \mid x, y))} \\ - \underbrace {\int_ {z} q (z | y) \log q (z | y) d z} _ {- H (q (z | y))}. \\ \end{array}
|
| 378 |
+
$$
|
| 379 |
+
|
| 380 |
+
If the above expression tends to infinity, either $H(q(z|x,y)) \to -\infty$ or $H(q(z|y)) \to \infty$ , meaning that either $q(z|x,y) \to a$ Delta function, or $q(z|y) \to$ uniform. Either condition implies $\frac{q(z|x,y)}{q(z|y)} = B\delta (z - g(x,y))$ .
|
| 381 |
+
|
| 382 |
+
proof of sufficient condition:
|
| 383 |
+
|
| 384 |
+
$$
|
| 385 |
+
\begin{array}{l} \mathbb {E} _ {q (x, z \mid y)} \log \frac {q (z \mid x , y)}{q (z \mid y)} = \int_ {x} q (x \mid y) \int_ {z} q (z \mid x, y) \log \frac {q (z \mid x , y)}{q (z \mid y)} d z d x \tag {17} \\ = \int_ {x} q (x | y) \int_ {z} q (z | y) \frac {q (z | x , y)}{q (z | y)} \log \frac {q (z | x , y)}{q (z | y)} d z d x. \\ \end{array}
|
| 386 |
+
$$
|
| 387 |
+
|
| 388 |
+
Now we set $\frac{q(z|x,y)}{q(z|y)} = B\delta (z - g(x,y))$
|
| 389 |
+
|
| 390 |
+
$$
|
| 391 |
+
\begin{array}{l} \int_ {x} q (x | y) \int_ {z} q (z | y) B \delta (z - g (x, y)) \log B \delta (z - g (x, y)) d z d x \\ = \int_ {x} q (x | y) q (g (x, y) | y) \log B \underbrace {\delta (g (x , y) - g (x , y))} _ {\rightarrow \infty , \forall x} d x. \tag {18} \\ \end{array}
|
| 392 |
+
$$
|
| 393 |
+
|
| 394 |
+
Therefore, $\frac{q(z|x,y)}{q(z|y)} = B\delta (z - g(x,y))$ is a sufficient condition for $\mathbb{E}_{q(x,z|y)}\log \frac{q(z|x,y)}{q(z|y)}\to \infty$
|
| 395 |
+
|
| 396 |
+
# A.5 PROOF OF EQUATION 6
|
| 397 |
+
|
| 398 |
+
$$
|
| 399 |
+
\begin{array}{l} \mathbb {E} _ {q (x, z \mid y)} \log \frac {q (z \mid x , y)}{q (z \mid y)} = \int_ {z} \int_ {x} q (x, z \mid y) \log q (z \mid x, y) d z d x \\ - \int_ {z} \int_ {x} q (x, z | y) \log q (z | y) d z d x \\ = \int_ {x} q (x | y) \int_ {z} q (z | x, y) \log q (z | x, y) d z d x \\ - \int_ {z} \int_ {x} q (x, z | y) \log q (z | y) d z d x \tag {19} \\ \geq \int_ {x} q (x | y) \int_ {z} q (z | x, y) \log r (z | x, y) d z d x \\ - \int_ {z} \int_ {x} q (x, z | y) \log q (z | y) d z d x \\ = \mathbb {E} _ {q (x, z | y)} \log \frac {r (z | x , y)}{q (z | y)}, \\ \end{array}
|
| 400 |
+
$$
|
| 401 |
+
|
| 402 |
+
Where the inequality derives from the positivity of the KL divergence $KL(q(z|x,y)||r(z|x,y))$
|
| 403 |
+
|
| 404 |
+
# B ALGORITHM
|
| 405 |
+
|
| 406 |
+
# B.1 DETAILS OF TRAINING
|
| 407 |
+
|
| 408 |
+
As detailed in section 3.2, to train our variational posterior $q(x|y)$ , we maximise through gradient ascent the TAE ELBO with the reduced entropy penalty function
|
| 409 |
+
|
| 410 |
+
$$
|
| 411 |
+
\begin{array}{l} \arg \max _ {x \in [ - \infty , + \infty ]} \mathbb {E} _ {q (x | y)} \log p (y | x) + \mathbb {E} _ {q (x | y)} \left[ \underbrace {\mathbb {E} _ {q \left(z _ {p} \mid x\right)} \log p \left(x \mid z _ {p}\right) - K L \left(q \left(z _ {p} \mid x\right) \mid p \left(z _ {p}\right)\right)} _ {\text {P r i o r}, \quad E L B O, \quad \geq p (x)} \right] \tag {20} \\ + H (q (z | y)) + \mathbb {E} _ {q (z | y)} H (q (x | z, y)) - \lambda \left| \mathbb {E} _ {q (z, x | y)} \log \frac {r (z | x , y)}{q (z | y)} - C \right|. \\ \end{array}
|
| 412 |
+
$$
|
| 413 |
+
|
| 414 |
+
All expectations in the above expression are computed and optimised by sampling the corresponding conditional distributions using the re-parametrisation trick characteristic of VAEs.
|
| 415 |
+
|
| 416 |
+
Because the prior LVM $p(x) = \int p(z_p)p(x|z_p)dz_p$ is training entirely with samples from the posterior LVM, which is also training, the model can easily obtain high values for the prior ELBO by generating collapsed samples $x$ with the posterior and get stuck in an unfavourable local minimum. TO avoid this, we employ a warm up strategy. We define a positive parameter $\gamma$ that multiplies the expectation of the prior ELBO and the entropy $H(x|z,y)$ :
|
| 417 |
+
|
| 418 |
+
$$
|
| 419 |
+
\begin{array}{l} \arg \max _ {p (x | y) \log p (y | x) + \gamma \mathbb {E} _ {q (x | y)} \left[ \underbrace {\mathbb {E} _ {q \left(z _ {p} | x\right)} \log p \left(x \mid z _ {p}\right) - K L \left(q \left(z _ {p} | x\right) \mid \mid p \left(z _ {p}\right)\right)} _ {\text {P r i o r}, \quad E L B O, \quad \geq p (x)} \right] \tag {21} \\ + H (q (z | y)) + \gamma \mathbb {E} _ {q (z | y)} H (q (x | z, y)) - \lambda \left| \mathbb {E} _ {q (z, x | y)} \log \frac {r (z | x , y)}{q (z | y)} - C \right|. \\ \end{array}
|
| 420 |
+
$$
|
| 421 |
+
|
| 422 |
+
The value of $\gamma$ is initially set to zero. After a set number of iterations it is linearly increased to reach one and kept constant for the remaining training iterations.
|
| 423 |
+
|
| 424 |
+
# B.2 COMPLETE OBJECTIVE FUNCTION
|
| 425 |
+
|
| 426 |
+
Observation Parameters: In the general case, the corruption process $p(y|x)$ , mapping clean data $x$ to degraded samples $y$ , is controlled by parameters that differ from sample to sample. We can distinguish these into observed parameters $\alpha$ and unobserved parameters $\beta$ . For example, in the case of missing values and noise, the indexes of missing entries in each sample are often observed parameters, while the noise level is an unobserved parameter. The complete form of the corruption likelihood for a clean sample $x_{i}$ is then $p(y|x_i,\alpha_i,\beta_i)$ .
|
| 427 |
+
|
| 428 |
+
Objective Function: With the parameters conditionals described in subsection 3.2.4 and explicitly showing the parameters to be optimised, the objective function we maximise is the following
|
| 429 |
+
|
| 430 |
+
$$
|
| 431 |
+
\begin{array}{l} \arg \max _ {\theta , \phi} \quad \mathbb {E} _ {q _ {\phi} (x, \beta | y, \alpha)} \log p (y | x, \alpha , \beta) \\ + \gamma \mathbb {E} _ {q _ {\phi} (x | y, \alpha)} \left[ \mathbb {E} _ {q _ {\phi_ {3}} (z _ {p} | x)} \log p _ {\theta} (x | z _ {p}) - K L \left(q _ {\phi_ {3}} (z _ {p} | x) | | p (z _ {p})\right) \right] \\ + H \left(q _ {\phi_ {1}} (z | y, \alpha)\right) + \gamma \mathbb {E} _ {q _ {\phi_ {1}} (z | y, \alpha)} H \left(q _ {\phi_ {2}} (x | z, y, \alpha)\right) \tag {22} \\ - \lambda \left| \mathbb {E} _ {q _ {\phi} (z, x | y, \alpha)} \log \frac {r _ {\phi_ {4}} (z | x)}{q _ {\phi_ {1}} (z | y , \alpha)} - C \right|, \\ \end{array}
|
| 432 |
+
$$
|
| 433 |
+
|
| 434 |
+
$$
|
| 435 |
+
q _ {\phi} (x, \beta | y, \alpha) = \int_ {z} q _ {\phi_ {1}} (z | y, \alpha) q _ {\phi_ {2}} (x | z, y, \alpha) q _ {\phi_ {5}} (\beta | z, y, \alpha) d z, \quad q _ {\phi} (x | y, \alpha) =
|
| 436 |
+
$$
|
| 437 |
+
|
| 438 |
+
$$
|
| 439 |
+
\begin{array}{l} \int_ {z} q _ {\phi_ {1}} (z | y, \alpha) q _ {\phi_ {2}} (x | z, y, \alpha) d z, q _ {\phi} (z, x | y, \alpha) = q _ {\phi_ {1}} (z | y, \alpha) q _ {\phi_ {2}} (x | z, y, \alpha), \phi = \{\phi_ {1: 5} \} \text {a r e} \\ \text {t h e p a r a m e t e r s o f t h e i n f e n c e m o d e l s a n d} \theta \text {a r e t h e p a r a m e t e r o f t h e p r i o r m o d e l .} \end{array}
|
| 440 |
+
$$
|
| 441 |
+
|
| 442 |
+
# B.3 PSEUDO-CODE
|
| 443 |
+
|
| 444 |
+
# B.4 POSTERIOR COLLAPSE IN VARIATIONAL INFERENCE
|
| 445 |
+
|
| 446 |
+
The posterior collapse problem we approach with the reduced entropy condition method presented in this paper has some analogy with the latent posterior collapse encountered when using implicit distributions in variational inference to obtain flexible recognition models. The main issue in training these models successfully without collapse is the computation of density rations between the latent prior and the implicit variational posterior. This problem is analogous to the difficulty in estimating the LVM entropy in our method. Yin & Zhou (2018) proposed to use a further lower bound on the ELBO and add a term encouraging diversity to avoid collapse. This term is obtained by drawing $K$ Gaussian components from the LVM posterior and computing the KL divergence of an individual component with the mixture distribution, i.e. the sum of the drawn Gaussians. Titsias & Ruiz (2019) build on this work by deriving an unbiased estimator for the ELBO gradient, instead of using a surrogate lower bound.
|
| 447 |
+
|
| 448 |
+
These methods rely on estimating the LVM posterior through sampling and aggregating $K$ explicit distribution components. This was demonstrated to work well for posteriors in artificial latent space by drawing only a few components. However, in our data recovery setting, we need to capture the posteriors in clean data space, rather than a latent space. Posteriors capturing the uncertainty in natural data are expected to be much more complex and higher dimensional, leading to the number $K$ of drawn Gaussians needed to approximate the true LVM with these methods to become rather large, making optimisation inefficient or even intractable in extreme cases. The reduced entropy condition method we derive in this paper avoids the posterior collapse without having to estimate the LVM posterior through sampling and is therefore specially suited for the data recovery setting, where we are required to capture posteriors in clean data space.
|
| 449 |
+
|
| 450 |
+
# C EXPERIMENTAL DETAILS
|
| 451 |
+
|
| 452 |
+
# C.1 MODELS' ARCHITECTURES
|
| 453 |
+
|
| 454 |
+
In all experiments we carry out comparing our TAE with competitive methods, we make the independence assumption $q(x|z,y) = q(x|z)$ , consequently making $r(z|x,y) = r(z|x)$ . In this way, the reconstruction posterior LVMs $q(x|y)$ we compare between TAE, MVAE and MIWAE all present identical structure and differences in performance are a result of the model constructed to train them alone. However, we note that, unlike the two competing method, the TAE is not formally limited to this choice and can infer conditionals $q(x|z,y)$ in the general case. We hereafter detail the architecture used for all quantitative experiments of section 4.1 and 4.2.
|
| 455 |
+
|
| 456 |
+
MVAE and MIWAE models: The MVAE model is built as an LVM having a unit Gaussian prior in the latent space $p(z) = \mathcal{N}(z;0,1)$ and isotropic Gaussian clean data likelihood $p(x|z) = \mathcal{N}(x;\mu_x,\sigma_x^2)$ , where the moments $\mu_{x}$ and $\sigma_x^2$ are outputs of a neural network having as input the latent variables $z$ . Because we only observe corrupted data $y$ , rather than clean data $x$ , the recognition model $q(z|y)$ is conditioned on observed corrupted data $y$ and also has the form of an isotropic Gaussian $q(z|y) = \mathcal{N}(z;\mu_z,\sigma_z^2)$ , where the moments $\mu_{z}$ and $\sigma_z^2$ are outputs of a
|
| 457 |
+
|
| 458 |
+
# Algorithm 1 Training the TAE Model
|
| 459 |
+
|
| 460 |
+
Inputs: Corrupted observations $Y = \{y_{1:N}\}$ ; Observed Parameters $A = \{\alpha_{1:N}\}$ initial model parameters, $\{\theta^{(0)},\phi^{(0)}\}$ ; user-defined posterior latent dimensionality, $J$ ; user-defined prior latent dimensionality, $J_p$ ; user-defined condition strength $\lambda$ ; user-defined condition parameter $C$ ; user-defined latent prior $p(z_p)$ ; user-defined initial warm-up coefficient $\gamma_0$ ; user-defined final warm-up coefficient $\gamma_f$ ; warm-up start $N_{w0}$ ; warm-up end $N_{wf}$ ; user-defined number of iterations, $N_{iter}$ .
|
| 461 |
+
|
| 462 |
+
1: $\gamma^{(k = 0)}\gets \gamma_0$
|
| 463 |
+
2: for the $k$ 'th iteration in $[0:N_{iter} - 1]$
|
| 464 |
+
3: for the $i$ th observation
|
| 465 |
+
4: $z_{i}\sim q_{\phi_{1}^{(k)}}(z|y_{i},\alpha_{i})$
|
| 466 |
+
5: $x_{i}\sim q_{\phi_{2}^{(k)}}(x|z_{i},y_{i},\alpha_{i})$
|
| 467 |
+
6: $\beta_{i}\sim q_{\phi_{\mathfrak{g}}^{(k)}}(\beta |z_{i},y_{i},\alpha_{i})$
|
| 468 |
+
7: $z_{p,i} \sim q_{\phi_3^{(k)}}(z_p|x_i)$
|
| 469 |
+
8: $\mathbf{E}_i^{(k)}\gets \log p(y_i|x_i,\beta_i)$
|
| 470 |
+
9: $\mathbf{P}_i^{(k)}\gets \log p_{\theta^{(k)}}(x_i|z_{p,i})$
|
| 471 |
+
0: $\mathbf{K}_i^{(k)}\gets D_{KL}(q_{\phi_3^{(k)}}(z_p|x_i)||p(z_p))$
|
| 472 |
+
11: $\mathbf{H}\mathbf{z}_i^{(k)}\gets H(q_{\phi_1^{(k)}}(z|y_i,\alpha_i))$
|
| 473 |
+
12: $\mathbf{H}\mathbf{x}_i^{(k)}\gets H(q_{\phi_2^{(k)}}(x|z_i,y_i,\alpha_i))$
|
| 474 |
+
13: $\mathbf{R}_i^{(k)}\gets \log r_{\phi_4^{(k)}}(z_i|x_i,y_i,\alpha_i)$
|
| 475 |
+
14: $\mathbf{Q}_i^{(k)}\gets \log q_{\phi_1^{(k)}}(z_i|y_i,\alpha_i)$
|
| 476 |
+
15: end
|
| 477 |
+
16: $\mathbf{F}^{(k)} = \sum_{i}\left(\mathbf{E}_{i}^{(k)} + \gamma^{(k)}\left[\mathbf{P}_{i}^{(k)} - \mathbf{K}_{i}^{(k)} + \mathbf{H}\mathbf{x}_{i}^{(k)}\right]\right.$
|
| 478 |
+
17: $+\mathbf{H}\mathbf{z}_i^{(k)} - \lambda \left|\mathbf{R}_i^{(k)} - \mathbf{Q}_i^{(k)} - C\right|$
|
| 479 |
+
18: $\theta^{(k + 1)},\phi^{(k + 1)}\gets \arg \max (\mathbf{F}^{(k)})$
|
| 480 |
+
19: if $k > N_{w0}$ and $k < N_{wf}$
|
| 481 |
+
20: $\gamma^{(k + 1)}\gets \gamma^{(k)} + (\gamma_f - \gamma_0) / (N_{wf} - N_{w0})$
|
| 482 |
+
21: else
|
| 483 |
+
22: $\gamma^{(k + 1)}\gets \gamma^{(k)}$
|
| 484 |
+
23: end
|
| 485 |
+
24: end
|
| 486 |
+
|
| 487 |
+
neural network having as input the corrupted observations $y$ . The corrupt data likelihood $p(y|z)$ is obtained by simply selecting the likelihood $p(x|z)$ over the observed indexes, i.e. for missing values corruption the integral $p(y|z) = \int p(x|z)p(y|x)dx$ simply masks out the unobserved entries. The model is then trained by maximising the ELBO of equation 1. The MIWAE is built with the same structure, but instead of optimising the MVAE ELBO of equation 1, an importance weighted lower bound is maximised, as described in (Mattei & Frellsen, 2019). The precise architectures used for the neural networks are described for the different experiments throughout the rest of this section. One important point to notice is that, in each experiments, the structures of $p(x|z)$ and $q(z|y)$ are chosen such that the resulting reconstruction model after training, i.e. the model taking as input a test corrupt observation $y$ and generating clean samples $x$ , is identical for the TAE and the two tested competing models. That is $\int q(z|y)p(x|z)dz$ for the MVAE and MIWAE have identical structure to $\int q(z|y)q(x|z)dz$ for the TAE. In this way, performance differences can be attributed solely to the difference in inference strategy and not reconstruction model's capacity.
|
| 488 |
+
|
| 489 |
+
Posteriors structure: The posterior parametric components are $q_{\phi_1}(z|y,\alpha)$ and $q_{\phi_2}(x|z)$ ( $p_{\phi_2}(x|z)$ in the case of the MVAE and MIWAE). $q_{\phi_1}(z|y,\alpha)$ consists in a fully connected two layers neural network with leaky ReLu non-linearities, taking as input concatenated corrupted observations $y$ and a binary mask that labels the missing entries $\alpha$ and returning as output a vector of latent means and a vector of latent log variances. The two intermediate deterministic layers have 400 hidden units, while the latent space $z$ is 20-dimensional.
|
| 490 |
+
|
| 491 |
+
$q_{\phi_2}(x|z)$ , and $p_{\phi_2}(x|z)$ in the case of the MVAE and MIWAE, are similarly constructed, consisting in a fully connected two layers neural network with leaky ReLu non-linearities, taking as input latent variables $z$ and returning a vector of means and a vector of log variances of clean samples $x$ . The two intermediate deterministic layers have 400 hidden units.
|
| 492 |
+
|
| 493 |
+
TAE Prior LVM Structure: The TAE prior encoder $q_{\phi_3}(z_p|x)$ has the same general structure as the posterior encoder, with two fully connected layers and leaky ReLu non-linearities, taking as input generated clean data $x$ and returning as outputs a vector of latent means and a vector of latent log variances for the prior latent variable $z_p$ . As this model has less capacity than the posterior LVM, the two deterministic hidden layers have 50 hidden units each and the latent variables $z_p$ are 5-dimensional.
|
| 494 |
+
|
| 495 |
+
$p_{\theta}(x|z_p)$ is similarly constructed, consisting in a fully connected two layers neural network with leaky ReLu non-linearities, taking as input latent variables $z_{p}$ and returning a vector of means and a vector of log variances of clean samples $x$ . The two intermediate deterministic layers have 50 hidden units.
|
| 496 |
+
|
| 497 |
+
Approximate Latent Posterior Structure: The approximate latent posterior $r(z|x)$ has the same structure as the posterior encoder, consisting in a fully connected two layers neural network with leaky ReLu non-linearities, taking as input generated clean data $x$ and returning as outputs a vector of latent means and a vector of latent log variances. The two intermediate deterministic layers have 400 hidden units.
|
| 498 |
+
|
| 499 |
+
Convolutional TAE Structure: For the imputation of NYU missing data we use convolutional conditionals in our model, instead of fully connected ones. In this version, we do not make the independence assumption, using $q(x|z,y)$ and $r(z|x,y)$ . $q(z|y,\alpha)$ takes concatenated $y$ and $\alpha$ and passes them through 4 recurrent convolutional layers with filters of size $3 \times 3$ and 5 channels, each time down-sampling by two. The last layer is mapped to means and standard deviation of latent images $z$ , which are $1/32$ of the original size in each axis and have 10 channels, through two convolutional filter banks with strides $1 \times 1$ . $q(x|z,y,\alpha)$ is built to mirror this structure, with the addition of accepting inputs from $y$ and $\alpha$ . Three recurrent transpose convolutional layers with $3 \times 3$ filters, 5 channels and $2 \times 2$ upsampling each map $z$ to a deterministic layer with $1/2 \times 1/2$ of the original images size. concatenated $y$ and $\alpha$ are mapped to the same size with a single convolutional layer, downsampling it by $1/2 \times 1/2$ and 5 channels. The two are concatenated and the resulting layer is finally upsampled to inferred clean image $x$ by a last convolution with a filter bank. All non-linearities are Elu.
|
| 500 |
+
|
| 501 |
+
The prior networks are built in a similar way, but with shallower structures to give less capacity. $q(z_{p}|x)$ passes $x$ through 2 convolutional layers, each with down-sampling of $4 \times 4$ and 5 channels. as before, mens and standard deviations of latent images $z_{p}$ are generated from this last layers with $2 \times 2$ down-sampling and, in this case, 5 channels. The prior generator $p(x|z_p)$ is built to exactly mirror this structure. $r(z|x,y,\alpha)$ has the same structure as $q(z|y,\alpha)$ , with the only difference being that it accepts as input concatenated $x, y$ and $\alpha$ .
|
| 502 |
+
|
| 503 |
+
# C.2 EXPERIMENTAL CONDITIONS
|
| 504 |
+
|
| 505 |
+
Posterior Recovery: All posterior recovery experiments, with each of the three data sets tested, are performed on samples that have been re-scaled from 0 to 1. In all cases, the sets are injected with additive Gaussian noise having standard deviation 0.1. Subsequently, random binary masks are generated to block out some entries, resulting in missing values. The proportion of missing entries in the masks was set as described in the main body in each case.
|
| 506 |
+
|
| 507 |
+
Experiments were repeated with re-generated binary masks 5 times. The means and error bars shown in figure 4 and the uncertainty reported in table 1 were computed from these. The MIWAE was trained with 20 weights per sample. After training, all posteriors $q(x|y)$ have identical structure and
|
| 508 |
+
|
| 509 |
+
are tested in the same way, by training an inference network on the test set to compute the ELBO values.
|
| 510 |
+
|
| 511 |
+
Classification Experiments: The TAE models for the MNIST and Fashion-MNIST experiments were trained in the conditions described above. In each case, a random subset of 10,000 samples is taken from the corrupted set and the TAE and MVAE models are trained with it. A random subset of 1,000 of these is selected and ground-truth lables for these samples are made available.
|
| 512 |
+
|
| 513 |
+
A classifier consisting in a single fully connected layer with leaky ReLu non-linearity is trained to perform classification on this subset. For each stochastic training iteration of this classifier, we generate samples associated with the corrupted observations and provide the associated labels. After the classifier is trained, we test classification performance on the remaining 9,000 examples, by running the train classifier 400 times per sample, each time generating clean data from a corrupted observation with the TAE and the MVAE. The histograms shown in figure 5 are built by aggregating the resulting classification.
|
| 514 |
+
|
| 515 |
+
The above procedure is repeated 15 times. The resulting means and standard deviations of the tested classification performance are shown in figure 6.
|
| 516 |
+
|
| 517 |
+
Training Conditions: Hyper-parameters of optimisation for the models were cross validated with the MNIST data set at a proportion of missing entries of 0.9. Hyper-parameters common to all models were determined by obtaining best performance with the MVAE model. Hyper-parameters specific to the TAE model were obtained by fixing the common parameters and cross validating these. The resulting optimal hyper parameters were then used in all other experiments of section 4.1 and 4.2, including those with different data sets. Common parameters are as follows: 500,000 iterations with the ADAM optimiser in Tensorflow, an initial training of $2^{-4}$ and batch size of 20. The hyper-parameters specific to the TAE are instead: $\gamma$ initially set to 0.01 and then linearly increased to 1 between 50,000 and 100,000 iterations, $\lambda = 2$ and $C = 10$ . All experiments were performed using a TitanX GPU.
|
| 518 |
+
|
| 519 |
+
NYU Rooms Experiments: For these experiments, we take a subset of 3612 depth maps from the NYU raw data set. We slightly crop these in one dimension to be $480 \times 608$ images. The convolutional TAE and MVAE to obtain the results of figure 7, were trained for 100,000 iterations using the ADAM optimiser in Tensorflow, with a batch size of 20 images and an initial training rate of $2 \times 10^{-2}$ . For the warm up, we initially set $\gamma = 0.01$ and linearly increase it to 1 between 10,000 and 20,000 iterations. For these experiments, $\lambda = 2$ and $C = 15$ .
|
| 520 |
+
|
| 521 |
+
# C.3 EVALUATION ELBO
|
| 522 |
+
|
| 523 |
+
To evaluate the probabilistic performance of our method compared to others, we compute an evaluation ELBO which relies on test ground truths. After each model is trained unsupervisedly, we obtain a posterior of the form $q(x|y) = \int q(z|y)q(x|z)dz$ , where for the MVAE and MIWAE, $q(x|z) = p(x|z)$ . Given the a test set of paired clean and corrupted samples $x_{t}$ and $y_{t}$ , we construct a new parametric recognition model, which encodes latent distributions from ground-truths $q_{\eta}(z|x)$ . We then optimise the following:
|
| 524 |
+
|
| 525 |
+
$$
|
| 526 |
+
\arg \max _ {\eta} \mathbb {E} _ {q _ {\eta} (z | x _ {t})} \log q \left(x _ {t} | z\right) + K L \left(q _ {\eta} (z | x _ {t}) \mid q (z | y _ {t})\right). \tag {23}
|
| 527 |
+
$$
|
| 528 |
+
|
| 529 |
+
The above is a conditional VAE ELBO with conditional prior $q(z|y)$ and is a lower bound to the test likelihood we are interested in $q(x_t|y_t)$ . Note that we optimise over $\eta$ only, therefore the new recognition model $q(z|x)$ is the only one which is affected by this optimisation and the components of our reconstruction model $q(z|y)$ and $q(x|z)$ remain the same as trained with the unsupervised training set. As a result, this new optimisation only tightens the bound, rather than maximising the likelihood, which we want to evaluate as trained previously.
|
| 530 |
+
|
| 531 |
+
# D ADDITIONAL EXPERIMENTS
|
| 532 |
+
|
| 533 |
+
# D.1 $C$ AND $\lambda$ CROSS-VALIDATION
|
| 534 |
+
|
| 535 |
+
$C$ and $\lambda$ in equation 22 are hyper-parameters of our inference algorithm and need to be user defined. In our experiments, we determine the optimal values by cross-validation, as described in section C.
|
| 536 |
+
|
| 537 |
+
We report in figure 7 a cross validation study where we measure the TAE ELBO for MNIST with $90\%$ missing values and additive noise.
|
| 538 |
+
|
| 539 |
+

|
| 540 |
+
Figure 7: ELBO for MNIST with $90\%$ missing values and additive noise as a function of chosen hyper-parameters $C$ and $\lambda$ (in log scale). The performance of TAE exceeds that of a standard VAE approach over a broad range of values. If the values are too large, the model collapses during optimisation, making such situation easy to diagnose.
|
| 541 |
+
|
| 542 |
+
As shown in figure 7, the performance of TAEs is robust to variations in hyper-parameters $C$ and $\lambda$ over a broad range of values. They also have an intuitive meaning that helps in their selection. In practice, $C$ controls the final value of localisation and is desirable to be as high as stability of the optimisation allows. $\lambda$ controls how fast we are imposing the model to approach $C$ .
|
| 543 |
+
|
| 544 |
+
# D.2 STRUCTURED MISSINGS
|
| 545 |
+
|
| 546 |
+
We test a TAE in a situation analogous to that shown in figure 4 of section 4, but with structured missing values instead of randomly missing ones. For each sample in MNIST, we only make visible a small $10 \times 10$ pixels window, randomly placed in each example, while the rest of the images remain hidden. In addition, the values in the observed window are subject to additive Gaussian noise, similarly to the missing-at-random case. Reconstructed with the comparative MVAE and our TAE are shown in figure 8.
|
| 547 |
+
|
| 548 |
+
Similarly to the missing-at-random case, the MVAE collapses on single solutions, giving draws from the posterior that are all very similar to each other. Contrarily, the TAE gives more variation in the possible solutions exploring more appropriately the uncertainty in the solution space. The MVAE ELBO over the clean data for this problem is 428, while the TAE one is 638. The performance improvement provided by the TAE is analogous to that observed with missing-at-random experiments.
|
| 549 |
+
|
| 550 |
+

|
| 551 |
+
Figure 8: Examples of Bayesian reconstructions with MVAE and TAE on structured missing values. the MVAE returns good mean reconstructions, but its posteriors collapse on single solutions, giving draws that are very similar to each other. The TAE returns posteriors which more broadly explore the different possible clean samples associated with the corrupted observations, giving more variation in the posterior's draws.
|
| 552 |
+
|
| 553 |
+
# D.3 MORE ELBO EVALUATIONS
|
| 554 |
+
|
| 555 |
+
Table 2: ELBO assigned by the retrieved posteriors to the ground truth clean data.
|
| 556 |
+
|
| 557 |
+
<table><tr><td></td><td>MVAE</td><td>MIWAE</td><td>TAE</td></tr><tr><td>MNIST, 20% missing</td><td>883 ± 2</td><td>940 ± 3</td><td>1831 ± 8</td></tr><tr><td>MNIST, 50% missing</td><td>870 ± 6</td><td>917 ± 4</td><td>1719 ± 7</td></tr><tr><td>MNIST, 80% missing</td><td>803 ± 15</td><td>780 ± 6</td><td>1536 ± 14</td></tr><tr><td>Fashion-MNIST, 20% missing</td><td>775 ± 4</td><td>815 ± 4</td><td>1407 ± 24</td></tr><tr><td>Fashion-MNIST, 50% missing</td><td>757 ± 1</td><td>800 ± 7</td><td>1326 ± 7</td></tr><tr><td>Fashion-MNIST, 80% missing</td><td>723 ± 7</td><td>766 ± 8</td><td>1094 ± 13</td></tr><tr><td>UCI HAR, 20% missing</td><td>611 ± 3</td><td>628 ± 10</td><td>1039 ± 11</td></tr><tr><td>UCI HAR, 50% missing</td><td>585 ± 4</td><td>613 ± 6</td><td>1014 ± 6</td></tr><tr><td>UCI HAR, 80% missing</td><td>471 ± 10</td><td>584 ± 8</td><td>854 ± 52</td></tr></table>
|
| 558 |
+
|
| 559 |
+
# D.4 IMPUTATION WITHOUT NOISE
|
| 560 |
+
|
| 561 |
+
We carry out experiments on MNIST analogous to those shown in figure 3, but in the absence of noise, in order to test performance on imputation alone. Each tested ratio of observed entries is
|
| 562 |
+
|
| 563 |
+
repeated three times, re-generating the patterns of missings each repeat in order to obtain error bars. Results are shown in figure 10.
|
| 564 |
+
|
| 565 |
+

|
| 566 |
+
(b) Mean Inference Performance
|
| 567 |
+
|
| 568 |
+

|
| 569 |
+
(c) Probabilistic Performance
|
| 570 |
+
Figure 9: Missing value imputation performance on MNIST in the absence of noise. As in the noisy case, the PSNR values between the MVAE and the TAE are very similar. The TAE presents significantly superior ELBO values at low ratios of observed entries, but in this case, the gap is reduced as more entries are observed. This is because in the noiseless case, the solution space when most entries are observed is much more localised than in the noisy case, and therefore the MVAE collapsed posteriors do not fail as much to capture it.
|
| 571 |
+
|
| 572 |
+
# D.5 DE-NOISING
|
| 573 |
+
|
| 574 |
+
We carry out experiments on MNIST analogous to those shown in figure 3, but testing fully observed images at different levels of noise. Each tested ratio of observed entries is repeated three times, re-generating the patterns of missings each repeat in order to obtain error bars. Results are shown in figure 10.
|
| 575 |
+
|
| 576 |
+

|
| 577 |
+
Figure 10: De-noising performance on MNIST. As in the missing value imputation case, the $MVAE$ and $TAE$ perform very similarly in their mean reconstructions, but the TAE presents significantly better performance in capturing the distributions of clean solutions, as the test ELBO values are higher.
|
| 578 |
+
|
| 579 |
+

|
| 580 |
+
|
| 581 |
+
# D.6 NYU ROOMS RECOVERY EXAMPLES
|
| 582 |
+
|
| 583 |
+

|
| 584 |
+
Figure 11: Unsupervised missing value imputation with our TAE on raw depth maps from the NYU rooms data set, compared with a median filter approach and the standard MVAE. Missing pixels in the observed images are in white. The median filter results in overly smoothed images and is unable to fill pixels that are surrounded by large missing areas. The MVAE returns adequate reconstructions, however, it over-fits in certain locations and its uncertainty is largely over-estimated. The TAE returns good reconstructions and assigns high uncertainty to locations where reconstruction is most inaccurate, as shown by the marginal standard deviations.
|
tomographicautoencoderunsupervisedbayesianrecoveryofcorrupteddata/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c80601f702c53c7d479a53942e0476f3598ab082a274242e822f6a27ae582dc2
|
| 3 |
+
size 1220211
|
tomographicautoencoderunsupervisedbayesianrecoveryofcorrupteddata/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:84df90889647151de5cd772015aa896739eeed656facc6d11f60c46160b9c5a7
|
| 3 |
+
size 794842
|
towardsfasterandstabilizedgantrainingforhighfidelityfewshotimagesynthesis/ed4f063f-eda6-43e6-9fed-7af237e31bcc_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a7814eea96acfd2ec6b2d1f9c35c185bcbf8bd96d45699e9542ab40f93fdbaf2
|
| 3 |
+
size 72020
|
towardsfasterandstabilizedgantrainingforhighfidelityfewshotimagesynthesis/ed4f063f-eda6-43e6-9fed-7af237e31bcc_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c0013247dcc30774e41e96acf8d32794f90d3a32a0cb1f4b51d522d2fc5d5fa7
|
| 3 |
+
size 91897
|
towardsfasterandstabilizedgantrainingforhighfidelityfewshotimagesynthesis/ed4f063f-eda6-43e6-9fed-7af237e31bcc_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b71bd54421174653d6d3b83b298145de70f23374f80cf2303a89add45f35ef7e
|
| 3 |
+
size 23355604
|
towardsfasterandstabilizedgantrainingforhighfidelityfewshotimagesynthesis/full.md
ADDED
|
@@ -0,0 +1,270 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TOWARDS FASTER AND STABILIZED GAN TRAINING FOR HIGH-FIDELITY FEW-SHOT IMAGE SYNTHESIS
|
| 2 |
+
|
| 3 |
+
Bingchen Liu $^{1,2}$ , Yizhe Zhu $^{2}$ , Kunpeng Song $^{1,2}$ , Ahmed Elgammal $^{1,2}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Playform - Artrendex Inc., USA
|
| 6 |
+
$^{2}$ Department of Computer Science, Rutgers University
|
| 7 |
+
|
| 8 |
+
{bingchen.liu,yizhe.zhu,kunpeng.song}@rutgers.edu
|
| 9 |
+
|
| 10 |
+
elgammal@artrendex.com
|
| 11 |
+
|
| 12 |
+
# ABSTRACT
|
| 13 |
+
|
| 14 |
+
Training Generative Adversarial Networks (GAN) on high-fidelity images usually requires large-scale GPU-clusters and a vast number of training images. In this paper, we study the few-shot image synthesis task for GAN with minimum computing cost. We propose a light-weight GAN structure that gains superior quality on $1024 \times 1024$ resolution. Notably, the model converges from scratch with just a few hours of training on a single RTX-2080 GPU, and has a consistent performance, even with less than 100 training samples. Two technique designs constitute our work, a skip-layer channel-wise excitation module and a self-supervised discriminator trained as a feature-encoder. With thirteen datasets covering a wide variety of image domains<sup>1</sup>, we show our model's superior performance compared to the state-of-the-art StyleGAN2, when data and computing budget are limited.
|
| 15 |
+
|
| 16 |
+
# 1 INTRODUCTION
|
| 17 |
+
|
| 18 |
+
The fascinating ability to synthesize images using the state-of-the-art (SOTA) Generative Adversarial Networks (GANs) (Goodfellow et al., 2014) display a great potential of GANs for many intriguing real-life applications, such as image translation, photo editing, and artistic creation. However, expensive computing cost and the vast amount of required training data limit these SOTAs in real applications with only small image sets and low computing budgets.
|
| 19 |
+
|
| 20 |
+
In real-life scenarios, the available samples to train a GAN can be minimal, such as the medical images of a rare disease, a particular celebrity's portrait set, and a specific artist's artworks. Transfer-learning with a pre-trained model (Mo et al., 2020; Wang et al., 2020) is one solution for the lack of training images. Nevertheless, there is no guarantee to find a compatible pre-training dataset. Furthermore, if not, fine-tuning probably leads to even worse performance (Zhao et al., 2020).
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
Figure 1: Synthetic results on $1024^{2}$ resolution of our model, trained from scratch on single RTX 2080-Ti GPU, with only 1000 images. Left: 20 hours on Nature photos; Right: 10 hours on FFHQ.
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
|
| 27 |
+
In a recent study, it was highlighted that in art creation applications, most artists prefer to train their models from scratch based on their own images to avoid biases from fine-tuned pre-trained model. Moreover, It was shown that in most cases artists want to train their models with datasets of less than
|
| 28 |
+
|
| 29 |
+
100 images (Elgammal et al., 2020). Dynamic data-augmentation (Karras et al., 2020a; Zhao et al., 2020) smoothes the gap and stabilizes GAN training with fewer images. However, the computing cost from the SOTA models such as StyleGAN2 (Karras et al., 2020b) and BigGAN (Brock et al., 2019) remain to be high, especially when trained with the image resolution on $1024 \times 1024$ .
|
| 30 |
+
|
| 31 |
+
In this paper, our goal is to learn an unconditional GAN on high-resolution images, with low computational cost and few training samples. As summarized in Fig. 2, these training conditions expose the model to a high risk of overfitting and mode-collapse (Arjovsky & Bottou, 2017; Zhang & Khoreva, 2018). To train a GAN given the demanding training conditions, we need a generator $(G)$ that can learn fast, and a discriminator $(D)$ that can continuously provide useful signals to train $G$ . To address these challenges, we summarize our contribution as:
|
| 32 |
+
|
| 33 |
+
- We design the Skip-Layer channel-wise Excitation (SLE) module, which leverages low-scale activations to revise the channel responses on high-scale feature-maps. SLE allows a more robust gradient flow throughout the model weights for faster training. It also leads to an automated learning of a style/content disentanglement like StyleGAN2.
|
| 34 |
+
- We propose a self-supervised discriminator $D$ trained as a feature-encoder with an extra decoder. We force $D$ to learn a more descriptive feature-map covering more regions from an input image, thus yielding more comprehensive signals to train $G$ . We test multiple self-supervision strategies for $D$ , among which we show that auto-encoding works the best.
|
| 35 |
+
- We build a computational-efficient GAN model based on the two proposed techniques, and show the model's robustness on multiple high-fidelity datasets, as demonstrated in Fig. 1.
|
| 36 |
+
|
| 37 |
+
# 2 RELATED WORKS
|
| 38 |
+
|
| 39 |
+
Speed up the GAN training: Speeding up the training of GAN has been approached from various perspectives. Ngxande et al. propose to reduce the computing time with depth-wise convolutions. Zhong et al. adjust the GAN objective into a min-max-min problem for a shorter optimization path. Sinha et al. suggest to prepare each batch of training samples via a core-set selection, leverage the better data preparation for a faster convergence. However, these methods only bring a limited improvement in
|
| 40 |
+
|
| 41 |
+

|
| 42 |
+
Figure 2: The causes and challenges for training GAN in our studied conditions.
|
| 43 |
+
|
| 44 |
+
training speed. Moreover, the synthesis quality is not advanced within the shortened training time.
|
| 45 |
+
|
| 46 |
+
Train GAN on high resolution: High-resolution training for GAN can be problematic. Firstly, the increased model parameters lead to a more rigid gradient flow to optimize $G$ . Secondly, the target distribution formed by the images on $1024 \times 1024$ resolution is super sparse, making GAN much harder to converge. Denton et al. (2015); Zhang et al. (2017); Huang et al. (2017); Wang et al. (2018); Karras et al. (2019); Karnewar & Wang (2020); Karras et al. (2020b); Liu et al. (2021) develop the multi-scale GAN structures to alleviate the gradient flow issue, where $G$ outputs images and receives feedback from several resolutions simultaneously. However, all these approaches further increase the computational cost, consuming even more GPU memory and training time.
|
| 47 |
+
|
| 48 |
+
Stabilize the GAN training: Mode-collapse on $G$ is one of the big challenges when training GANs. And it becomes even more challenging given fewer training samples and a lower computational budget (a smaller batch-size). As $D$ is more likely to be overfitting on the datasets, thus unable to provide meaningful gradients to train $G$ (Gulrajani et al., 2017).
|
| 49 |
+
|
| 50 |
+
Prior works tackle the overfitting issue by seeking a good regularization for $D$ , including different objectives (Arjovsky et al., 2017; Lim & Ye, 2017; Tran et al., 2017); regularizing the gradients (Gulrajani et al., 2017; Mescheder et al., 2018); normalizing the model weights (Miyato et al., 2018); and augmenting the training data (Karras et al., 2020a; Zhao et al., 2020). However, the effects of these methods degrade fast when the training batch-size is limited, since appropriate batch statistics can hardly be calculated for the regularization (normalization) over the training iterations.
|
| 51 |
+
|
| 52 |
+
Meanwhile, self-supervision on $D$ has been shown to be an effective method to stabilize the GAN training as studied in Tran et al. (2019); Chen et al. (2019). However, the auxiliary self-supervision tasks in prior works have limited using scenario and image domain. Moreover, prior works only studied on low resolution images ( $32^2$ to $128^2$ ), and without a computing resource limitation.
|
| 53 |
+
|
| 54 |
+
# 3 METHOD
|
| 55 |
+
|
| 56 |
+
We adopt a minimalistic design for our model. In particular, we use a single conv-layer on each resolution in $G$ , and apply only three (input and output) channels for the conv-layers on the high resolutions ( $\geq 512 \times 512$ ) in both $G$ and $D$ . Fig. 3 and Fig. 4 illustrate the model structure for our $G$ and $D$ , with descriptions of the component layers and forward flow. These structure designs make our GAN much smaller than SOTA models and substantially faster to train. Meanwhile, our model remains robust on small datasets due to its compact size with the two proposed techniques.
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
Figure 3: The structure of the skip-layer excitation module and the Generator. Yellow boxes represent feature-maps (we show the spatial size and omit the channel number), blue box and blue arrows represent the same up-sampling structure, red box contains the SLE module as illustrated on the left.
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
|
| 63 |
+
# 3.1 SKIP-LAYER CHANNEL-WISE EXCITATION
|
| 64 |
+
|
| 65 |
+
For synthesizing higher resolution images, the generator $G$ inevitably needs to become deeper, with more conv-layers, in concert with the up-sampling needs. A deeper model with more convolution layers leads to a longer training time of GAN, due to the increased number of model parameters and a weaker gradient flow through $G$ (Zhang et al., 2017; Karras et al., 2018; Karnewar & Wang, 2020). To better train a deep model, He et al. design the Residual structure (ResBlock), which uses a skip-layer connection to strengthen the gradient signals between layers. However, while ResBlock has been widely used in GAN literature (Wang et al., 2018; Karras et al., 2020b), it also increases the computation cost.
|
| 66 |
+
|
| 67 |
+
We reformulate the skip-connection idea with two unique designs into the Skip-Layer Excitation module (SLE). First, ResBlock implements skip-connection as an element-wise addition between the activations from different conv-layers. It requires the spatial dimensions of the activations to be the same. Instead of addition, we apply channel-wise multiplications between the activations, eliminating the heavy computation of convolution (since one side of the activations now has a spatial dimension of $1^{2}$ ). Second, in prior GAN works, skip-connections are only used within the same resolution. In contrast, we perform skip-connection between resolutions with a much longer range (e.g., $8^{2}$ and $128^{2}$ , $16^{2}$ and $256^{2}$ ), since an equal spatial-dimension is no longer required. The two designs make SLE inherits the advantages of ResBlock with a shortcut gradient flow, meanwhile without an extra computation burden.
|
| 68 |
+
|
| 69 |
+
Formally, we define the Skip-Layer Excitation module as:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
\mathbf {y} = \mathcal {F} \left(\mathbf {x} _ {\text {l o w}}, \left\{\mathbf {W} _ {i} \right\}\right) \cdot \mathbf {x} _ {\text {h i g h}} \tag {1}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
Here $\mathbf{x}$ and $\mathbf{y}$ are the input and output feature-maps of the SLE module, the function $\mathcal{F}$ contains the operations on $\mathbf{x}_{low}$ , and $\mathbf{W}_i$ indicates the module weights to be learned. The left panel in Fig. 3 shows an SLE module in practice, where $\mathbf{x}_{low}$ and $\mathbf{x}_{high}$ are the feature-maps at $8 \times 8$ and $128 \times 128$ resolution respectively. An adaptive average-pooling layer in $\mathcal{F}$ first down-samples $\mathbf{x}_{low}$ into $4 \times 4$
|
| 76 |
+
|
| 77 |
+
along the spatial-dimensions, then a conv-layer further down-samples it into $1 \times 1$ . A LeakyReLU is used to model the non-linearity, and another conv-layer projects $\mathbf{x}_{low}$ to have the same channel size as $\mathbf{x}_{high}$ . Finally, after a gating operation via a Sigmoid function, the output from $\mathcal{F}$ multiplies $\mathbf{x}_{high}$ along the channel dimension, yielding $\mathbf{y}$ with the same shape as $\mathbf{x}_{high}$ .
|
| 78 |
+
|
| 79 |
+
SLE partially resembles the Squeeze-and-Excitation module (SE) proposed by Hu et al.. However, SE operates within one feature-map as a self-gating module. In comparison, SLE performs between feature-maps that are far away from each other. While SLE brings the benefit of channel-wise feature re-calibration just like SE, it also strengthens the whole model's gradient flow like ResBlock. The channel-wise multiplication in SLE also coincides with Instance Normalization (Ulyanov et al., 2016; Huang & Belongie, 2017), which is widely used in style-transfer. Similarly, we show that SLE enables $G$ to automatically disentangle the content and style attributes, just like StyleGAN (Karras et al., 2019). As SLE performs on high-resolution feature-maps, altering these feature-maps is shown to be more likely to change the style attributes of the generated image (Karras et al., 2019; Liu et al., 2021). By replacing $\mathrm{x}_{low}$ in SLE from another synthesized sample, our $G$ can generate an image with the content unchanged, but in the same style of the new replacing image.
|
| 80 |
+
|
| 81 |
+
# 3.2 SELF-SUPERVISED DISCRIMINATOR
|
| 82 |
+
|
| 83 |
+
Our approach to provide a strong regularization for $D$ is surprisingly simple. We treat $D$ as an encoder and train it with small decoders. Such auto-encoding training forces $D$ to extract image features that the decoders can give good reconstructions. The decoders are optimized together with $D$ on a simple reconstruction loss, which is only trained on real samples:
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\mathcal {L} _ {\text {r e c o n s}} = \mathbb {E} _ {\mathbf {f} \sim D _ {\text {e n c o d e}} (x), x \sim I _ {\text {r e a l}}} [ \| \mathcal {G} (\mathbf {f}) - \mathcal {T} (x) \| ], \tag {2}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
where $\mathbf{f}$ is the intermediate feature-maps from $D$ , the function $\mathcal{G}$ contains the processing on $\mathbf{f}$ and the decoder, and the function $\mathcal{T}$ represents the processing on sample $x$ from real images $I_{real}$ .
|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
Figure 4: The structure and the forward flow of the Discriminator. Blue box and arrows represent the same residual down-sampling structure, green boxes mean the same decoder structure.
|
| 93 |
+
|
| 94 |
+
Our self-supervised $D$ is illustrated in Fig. 4, where we employ two decoders for the feature-maps on two scales: $\mathbf{f}_1$ on $16^2$ and $\mathbf{f}_2$ on $8^2$ . The decoders only have four conv-layers to produce images at $128\times 128$ resolution, causing little extra computations (much less than other regularization methods). We randomly crop $\mathbf{f}_1$ with $\frac{1}{8}$ of its height and width, then crop the real image on the same portion to get $I_{part}$ . We resize the real image to get $I$ . The decoders produce $I_{part}^{\prime}$ from the cropped $\mathbf{f}_1$ , and $I^{\prime}$ from $\mathbf{f}_2$ . Finally, $D$ and the decoders are trained together to minimize the loss in eq. 2, by matching $I_{part}^{\prime}$ to $I_{part}$ and $I^{\prime}$ to $I$ .
|
| 95 |
+
|
| 96 |
+
Such reconstructive training makes sure that $D$ extracts a more comprehensive representation from the inputs, covering both the overall compositions (from $\mathbf{f}_2$ ) and detailed textures (from $\mathbf{f}_1$ ). Note that the processing in $\mathcal{G}$ and $\mathcal{T}$ are not limited to cropping; more operations remain to be explored for better performance. The auto-encoding approach we employ is a typical method for self-supervised learning, which has been well recognized to improve the model robustness and generalization ability (He et al., 2020; Hendrycks et al., 2019; Jing & Tian, 2020; Goyal et al., 2019). In the context of GAN, we find that a regularized $D$ via self-supervision training strategies significantly improves the synthesis quality on $G$ , among which auto-encoding brings the most performance boost.
|
| 97 |
+
|
| 98 |
+
Although our self-supervision strategy for $D$ comes in the form of an auto-encoder (AE), this approach is fundamentally different from works trying to combine GAN and AE (Larsen et al., 2016;
|
| 99 |
+
|
| 100 |
+
Guo et al., 2019; Zhao et al., 2016; Berthelot et al., 2017). The latter works mostly train $G$ as a decoder on a learned latent space from $D$ , or treat the adversarial training with $D$ as an supplementary loss besides AE's training. In contrast, our model is a pure GAN with a much simpler training schema. The auto-encoding training is only for regularizing $D$ , where $G$ is not involved.
|
| 101 |
+
|
| 102 |
+
In sum, we employ the hinge version of the adversarial loss (Lim & Ye (2017); Tran et al. (2017)) to iteratively train our D and G. We find the different GAN losses make little performance difference, while hinge loss computes the fastest:
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
\mathcal {L} _ {D} = - \mathbb {E} _ {x \sim I _ {\text {r e a l}}} [ \min (0, - 1 + D (x)) ] - \mathbb {E} _ {\hat {x} \sim G (z)} [ \min (0, - 1 - D (\hat {x}) ] + \mathcal {L} _ {\text {r e c o n s}} \tag {3}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
\mathcal {L} _ {G} = - \mathbb {E} _ {z \sim \mathcal {N}} [ D (G (z)) ], \tag {4}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
# 4 EXPERIMENT
|
| 113 |
+
|
| 114 |
+
Datasets: We conduct experiments on multiple datasets with a wide range of content categories. On $256 \times 256$ resolution, we test on Animal-Face Dog and Cat (Si & Zhu, 2011), 100-Shot-Obama, Panda, and Grumpy-cat (Zhao et al., 2020). On $1024 \times 1024$ resolution, we test on Flickr-Face-HQ (FFHQ) (Karras et al., 2019), Oxford-flowers (Nilsback & Zisserman, 2006), art paintings from WikiArt (wikiart.org), photographs on natural landscape from Unsplash (unsplash.com), Pokemon (pokemon.com), anime face, skull, and shell. These datasets are designed to cover images with different characteristics: photo realistic, graphic-illustration, and art-like images.
|
| 115 |
+
|
| 116 |
+
Metrics: We use two metrics to measure the models' synthesis performance: 1) Fréchet Inception Distance (FID) (Heusel et al., 2017) measures the overall semantic realism of the synthesized images. For datasets with less than 1000 images (most only have 100 images), we let $G$ generate 5000 images and compute FID between the synthesized images and the whole training set. 2) Learned perceptual similarity (LPIPS) (Zhang et al., 2018) provides a perceptual distance between two images. We use LPIPS to report the reconstruction quality when we perform latent space back-tracking on $G$ given real images, and measure the auto-encoding performance. We find it unnecessary to involve other metrics, as FID is unlikely to be inconsistent with the others, given the notable performance gap between our model and the compared ones. For all the testings, we train the models 5 times with random seeds, and report the highest scores. The relative error is less than five percent on average.
|
| 117 |
+
|
| 118 |
+
Compared Models: We compare our model with: 1) the state-of-the-art (SOTA) unconditional model, StyleGAN2, 2) a baseline model ablated from our proposed one. Note that we adopt StyleGAN2 with recent studies from (Karras et al., 2020a; Zhao et al., 2020), including the model configuration and differentiable data-augmentation, for the best training on few-sample datasets. Since StyleGAN2 requires much more computing-cost (cc) to train, we derive an extra baseline model. In sum, we compare our model with StyleGAN2 on the absolute image synthesis quality regardless of cc, and use the baseline model for the reference within a comparable cc range.
|
| 119 |
+
|
| 120 |
+
The baseline model is the strongest performer that we integrated from various GAN techniques based on DCGAN (Radford et al., 2015): 1) spectral-normalization (Miyato et al., 2018), 2) exponential-moving-average (Yazici et al., 2018) optimization on $G$ , 3) differentiable-augmentation, 4) GLU (Dauphin et al., 2017) instead of ReLU in $G$ . We build our model upon the baseline with the two proposed techniques: the skip-layer excitation module and the self-supervised discriminator.
|
| 121 |
+
|
| 122 |
+
Table 1: Computational cost comparison of the models.
|
| 123 |
+
|
| 124 |
+
<table><tr><td></td><td></td><td>StyleGAN2@0.25</td><td>StyleGAN2@0.5</td><td>StyleGAN2</td><td>Baseline</td><td>Ours</td></tr><tr><td>Resolution: 2562</td><td>Training time (hour / 10k iter)</td><td>1</td><td>1.8</td><td>3.8</td><td>0.7</td><td>1</td></tr><tr><td rowspan="2">Batch-size: 8</td><td>Training vram (GB)</td><td>7</td><td>16</td><td>18</td><td>5</td><td>6.5</td></tr><tr><td>Model parameters (million)</td><td>27.557</td><td>45.029</td><td>108.843</td><td>44.359</td><td>47.363</td></tr><tr><td>Resolution: 10242</td><td>Training time (hour / 10k iter)</td><td>3.6</td><td>5</td><td>7</td><td>1.3</td><td>1.7</td></tr><tr><td rowspan="2">Batch-size: 8</td><td>Training vram (GB)</td><td>12</td><td>23</td><td>36</td><td>9</td><td>10</td></tr><tr><td>Model parameters (million)</td><td>27.591</td><td>45.15</td><td>109.229</td><td>44.377</td><td>47.413</td></tr></table>
|
| 125 |
+
|
| 126 |
+
Table. 1 presents the normalized cc figures of the models on Nvidia's RTX 2080-Ti GPU, implemented using PyTorch (Paszke et al., 2017). Importantly, the slimed StyleGAN2 with $\frac{1}{4}$ parameters cannot converge on the tested datasets at $1024^2$ resolution. We compare to the StyleGAN2 with $\frac{1}{2}$ parameters (if not specifically mentioned) in the following experiments.
|
| 127 |
+
|
| 128 |
+
# 4.1 IMAGE SYNTHESIS PERFORMANCE
|
| 129 |
+
|
| 130 |
+
Few-shot generation: Collecting large-scale image datasets are expensive, or even impossible, for a certain character, a genre, or a topic. On those few-shot datasets, a data-efficient model becomes especially valuable for the image generation task. In Table. 2 and Table. 3, we show that our model not only achieves superior performance on the few-shot datasets, but also much more computational-efficient than the compared methods. We save the checkpoints every 10k iterations during training and report the best FID from the checkpoints (happens at least after 15 hours of training for StyleGAN2 on all datasets). Among the 12 datasets, our model performs the best on 10 of them.
|
| 131 |
+
|
| 132 |
+
Please note that, due to the VRAM requirement for StyleGAN2 when trained on $1024^2$ resolution, we have to train the models in Table. 3 on a RTX TITAN GPU. In practice, 2080-TI and TITAN share a similar performance, and our model runs the same time on both GPUs.
|
| 133 |
+
|
| 134 |
+
Table 2: FID comparison at ${256}^{2}$ resolution on few-sample datasets.
|
| 135 |
+
|
| 136 |
+
<table><tr><td colspan="3"></td><td>Animal Face - Dog</td><td>Animal Face - Cat</td><td>Obama</td><td>Panda</td><td>Grumpy-cat</td></tr><tr><td colspan="3">Image number</td><td>389</td><td>160</td><td>100</td><td>100</td><td>100</td></tr><tr><td rowspan="6">Training time on one RTX 2080-Ti</td><td rowspan="2">20 hour</td><td>StyleGAN2</td><td>58.85</td><td>42.44</td><td>46.87</td><td>12.06</td><td>27.08</td></tr><tr><td>StyleGAN2 finetune</td><td>61.03</td><td>46.07</td><td>35.75</td><td>14.5</td><td>29.34</td></tr><tr><td rowspan="4">5 hour</td><td>Baseline</td><td>108.19</td><td>150.3</td><td>62.74</td><td>15.4</td><td>42.13</td></tr><tr><td>Baseline+Skip</td><td>94.21</td><td>72.97</td><td>52.50</td><td>14.39</td><td>38.17</td></tr><tr><td>Baseline+decode</td><td>56.25</td><td>36.74</td><td>44.34</td><td>10.12</td><td>29.38</td></tr><tr><td>Ours (B+Skip+decode)</td><td>50.66</td><td>35.11</td><td>41.05</td><td>10.03</td><td>26.65</td></tr></table>
|
| 137 |
+
|
| 138 |
+
Training from scratch vs. fine-tuning: Fine-tuning from a pre-trained GAN (Mo et al., 2020; Noguchi & Harada, 2019; Wang et al., 2020) has been the go-to method for the image generation task on datasets with few samples. However, its performance highly depends on the semantic consistency between the new dataset and the available pre-trained model. According to Zhao et al., fine-tuning performs worse than training from scratch in most cases, when the content from the new dataset strays away from the original one. We confirm the limitation of current fine-tuning methods from Table. 2 and Table. 3, where we fine-tune StyleGAN2 trained on FFHQ use the Freeze-D method from Mo et al.. Among all the tested datasets, only Obama and Skull favor the fine-tuning method, making sense since the two sets share the most similar contents to FFHQ.
|
| 139 |
+
|
| 140 |
+
Module ablation study: We experiment with the two proposed modules in Table. 2, where both SLE (skip) and decoding-on- $D$ (decode) can separately boost the model performance. It shows that the two modules are orthogonal to each other in improving the model performance, and the self-supervised $D$ makes the biggest contribution. Importantly, the baseline model and StyleGAN2 diverge fast after the listed training time. In contrast, our model is less likely to mode collapse among the tested datasets. Unlike the baseline model which usually model-collapse after trained for 10 hours, our model maintains a good synthesis quality and won't collapse even after trained for 20 hours. We argue that it is the decoding regularization on $D$ that prevents the model from divergence.
|
| 141 |
+
|
| 142 |
+
Table 3: FID comparison at ${1024}^{2}$ resolution on few-sample datasets.
|
| 143 |
+
|
| 144 |
+
<table><tr><td colspan="3"></td><td>Art Paintings</td><td>FFHQ</td><td>Flower</td><td>Pokemon</td><td>Anime Face</td><td>Skull</td><td>Shell</td></tr><tr><td colspan="3">Image number</td><td>1000</td><td>1000</td><td>1000</td><td>800</td><td>120</td><td>100</td><td>60</td></tr><tr><td rowspan="4">Training time on one RTX TITAN</td><td rowspan="2">24 hour</td><td>StyleGAN2</td><td>74.56</td><td>25.66</td><td>45.23</td><td>190.23</td><td>152.73</td><td>127.98</td><td>241.37</td></tr><tr><td>StyleGAN2 finetune</td><td>N/A</td><td>N/A</td><td>36.72</td><td>60.12</td><td>61.23</td><td>107.68</td><td>220.45</td></tr><tr><td rowspan="2">8 hour</td><td>Baseline</td><td>62.27</td><td>38.35</td><td>42.25</td><td>67.86</td><td>101.23</td><td>186.45</td><td>202.32</td></tr><tr><td>Ours</td><td>45.08</td><td>24.45</td><td>25.66</td><td>57.19</td><td>59.38</td><td>130.05</td><td>155.47</td></tr></table>
|
| 145 |
+
|
| 146 |
+
Table 4: FID comparison at ${1024}^{2}$ resolution on datasets with more images.
|
| 147 |
+
|
| 148 |
+
<table><tr><td rowspan="2">Model</td><td>Dataset</td><td colspan="3">Art Paintings</td><td colspan="3">FFHQ</td><td colspan="3">Nature Photograph</td></tr><tr><td>Image number</td><td>2k</td><td>5k</td><td>10k</td><td>2k</td><td>5k</td><td>10k</td><td>70k</td><td>2k</td><td>5k</td></tr><tr><td colspan="2">StyleGAN2</td><td>70.02</td><td>48.36</td><td>41.23</td><td>18.38</td><td>10.45</td><td>7.86</td><td>4.4</td><td>67.12</td><td>41.47</td></tr><tr><td colspan="2">Baseline</td><td>60.02</td><td>51.23</td><td>49.38</td><td>36.45</td><td>27.86</td><td>25.12</td><td>17.62</td><td>71.47</td><td>66.05</td></tr><tr><td colspan="2">Ours</td><td>44.57</td><td>43.27</td><td>42.53</td><td>19.01</td><td>17.93</td><td>16.45</td><td>12.38</td><td>52.47</td><td>45.07</td></tr></table>
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
Figure 6: Latent space back-tracking and interpolation.
|
| 152 |
+
|
| 153 |
+
Table 5: LPIPS of back-tracking with $G$
|
| 154 |
+
|
| 155 |
+
<table><tr><td></td><td>Cat</td><td>Dog</td><td>FFHQ</td><td>Art</td></tr><tr><td>Resolution</td><td colspan="2">256</td><td colspan="2">1024</td></tr><tr><td>Baseline @ 20k iter</td><td>2.113</td><td>2.073</td><td>2.589</td><td>2.916</td></tr><tr><td>Baseline @ 40k iter</td><td>2.513</td><td>2.171</td><td>2.583</td><td>2.812</td></tr><tr><td>Ours @ 40k iter</td><td>1.821</td><td>1.918</td><td>2.425</td><td>2.624</td></tr><tr><td>Ours @ 80k iter</td><td>1.897</td><td>1.986</td><td>2.342</td><td>2.601</td></tr></table>
|
| 156 |
+
|
| 157 |
+
Table 6: FID of self-supervisions for $D$
|
| 158 |
+
|
| 159 |
+
<table><tr><td></td><td>Art paintings</td><td>Nature photos</td></tr><tr><td>a. contrastive loss</td><td>47.14</td><td>57.04</td></tr><tr><td>b. predict aspect ratio</td><td>49.21</td><td>59.22</td></tr><tr><td>c. auto-encoding</td><td>42.53</td><td>43.65</td></tr><tr><td>d. a+b</td><td>46.02</td><td>54.23</td></tr><tr><td>e. a+b+c</td><td>44.21</td><td>47.65</td></tr></table>
|
| 160 |
+
|
| 161 |
+
Training with more images: For more thorough evaluation, we also test our model on datasets with more sufficient training samples, as shown in Table. 4. We train the full StyleGAN2 for around five days on the Art and Photograph dataset with a batch-size of 16 on two TITAN RTX GPUs, and use the latest official figures on FFHQ from Zhao et al.. Instead, we train our model for only 24 hours, with a batch-size of 8 on a single 2080-Ti GPU. Specifically, for FFHQ with all 70000 images, we train our model with a larger batch-size of 32, to reflect an optimal performance of our model.
|
| 162 |
+
|
| 163 |
+
In this test, we follow the common practice of computing FID by generating 50k images and use the whole training set as the reference distribution. Note that StyleGAN2 has more than double the parameters compared to our model, and trained with a much larger batch-size on FFHQ. These factors contribute to its better performances when given enough training samples and computing power. Meanwhile, our model keeps up well with StyleGAN2 across all testings with a considerably lower computing budget, showing a compelling performance even on larger-scale datasets, and a consistent performance boost over the baseline model.
|
| 164 |
+
|
| 165 |
+
Qualitative results: The advantage of our model becomes more clear from the qualitative comparisons in Fig. 5. Given the same batch-size and training time, StyleGAN2 either converges slower or suffers from mode collapse. In contrast, our model consistently generates satisfactory images. Note that the best results from our model on Flower, Shell, and Pokemon only take three hours' training, and for the rest three datasets, the best performance is achieved at training for eight hours. For StyleGAN2 on "shell", "banana face", and "Pokemon", the images shown in Fig. 5 are already from the best epoch, which they match the scores in Table. 2 and Table. 3. For the rest of the datasets, the quality increase from StyleGAN2 is also limited given more training time.
|
| 166 |
+
|
| 167 |
+
# 4.2 MORE ANALYSIS AND APPLICATIONS
|
| 168 |
+
|
| 169 |
+
Testing mode collapse with back-tracking: From a well trained GAN, one can take a real image and invert it back to a vector in the latent space of $G$ , thus editing the image's content by altering the back-tracked vector. Despite the various back-tracking methods (Zhu et al., 2016; Lipton & Tripathi, 2017; Zhu et al., 2020; Abdal et al., 2019), a well generalized $G$ is arguably as important for the good inversions. To this end, we show that our model, although trained on limited image samples, still gets a desirable performance on real image back-tracking.
|
| 170 |
+
|
| 171 |
+
In Table 5, we split the images from each dataset with a training/testing ratio of 9:1, and train $G$ on the training set. We compute a reconstruction error between all the images from the testing set and their inversions from $G$ , after the same update of 1000 iterations on the latent vectors (to prevent the vectors from being far off the normal distribution). The baseline model's performance is getting worse with more training iterations, which reflects mode-collapse on $G$ . In contrast, our model gives better reconstructions with consistent performance over more training iterations. Fig. 6 presents the back-tracked examples (left-most and right-most samples in the middle panel) given the real images.
|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
Figure 5: Qualitative comparison between our model and StyleGAN2 on $1024^2$ resolution datasets. The left-most panel shows the training images, and the right two panels show the uncurated samples from StyleGAN2 and our model. Both models are trained from scratch for 10 hours with a batch-size of 8. The samples are generated from the checkpoint with the lowest FID.
|
| 175 |
+
|
| 176 |
+
The smooth interpolations from the back-tracked latent vectors also suggest little mode-collapse of our $G$ (Radford et al., 2015; Zhao et al., 2020; Robb et al., 2020).
|
| 177 |
+
|
| 178 |
+
In addition, we show qualitative comparisons in appendix D, where our model maintains a good generation while StyleGAN2 and baseline are model-collapsed.
|
| 179 |
+
|
| 180 |
+
The self-supervision methods and generalization ability on $D$ : Apart from the auto-encoding training for $D$ , we show that $D$ with other common self-supervising strategies also boost GAN's performance in our training settings. We test five self-supervision settings, as shown in Table 6, which all brings a substantial performance boost compared to the baseline model. Specifically, setting-a refers to contrastive learning which we treat each real image as a unique class and let $D$ classify them. For setting-b, we train $D$ to predict the real image's original aspect-ratio since they are reshaped to square when fed to $D$ . Setting-c is the method we employ in our model, which
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
AnimalFace - Cat
|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
AnimalFace - Dog
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
Obama
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
Art Painting
|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
Shell
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
Pokemon
|
| 199 |
+
Figure 7: Style-mixing results from our model trained for only 5 hours on single GPU.
|
| 200 |
+
|
| 201 |
+
trains $D$ as an encoder with a decoder to reconstruct real images. To better validate the benefit of self-supervision on $D$ , all the testings are conducted on full training sets with 10000 images, with a batch-size of 8 to be consistent with Table 4. We also tried training with a larger batch-size of 16, which the results are consistent to the batch-size of 8.
|
| 202 |
+
|
| 203 |
+
Interestingly, according to Table 6, while setting-c performs the best, combining it with the rest two settings lead to a clear performance downgrade. The similar behavior can be found on some other self-supervision settings, e.g. when follow Chen et al. (2019) with a "rotation-predicting" task on art-paintings and FFHQ datasets, we observe a performance downgrade even compared to the baseline model. We hypothesis the reason being that the auto-encoding forces $D$ to pay attention to more areas of the input image, thus extracts a more comprehensive feature-map to describe the input image (for a good reconstruction). In contrast, a classification task does not guarantee $D$ to cover the whole image. Instead, the task drives $D$ to only focus on small regions because the model can find class cues from small regions of the images. Focusing on limited regions (i.e., react to limited image patterns) is a typical overfitting behavior, which is also widely happening for $D$ in vanilla GANs. More discussion can be found in appendix B.
|
| 204 |
+
|
| 205 |
+
Style mixing like StyleGAN. With the channel-wise excitation module, our model gets the same functionality as StyleGAN: it learns to disentangle the images' high-level semantic attributes (style and content) in an unsupervised way, from $G$ 's conv-layers at different scales. The style-mixing results are displayed in Fig. 7, where the top three datasets are $256 \times 256$ resolution, and the bottom three are $1024 \times 1024$ resolution. While StyleGAN2 suffers from converging on the bottom high-resolution datasets, our model successfully learns the style representations along the channel dimension on the "excited" layers (i.e., for feature-maps on $256 \times 256$ , $512 \times 512$ resolution). Please refer to appendix A and C for more information on SLE and style-mixing.
|
| 206 |
+
|
| 207 |
+
# 5 CONCLUSION
|
| 208 |
+
|
| 209 |
+
We introduce two techniques that stabilize the GAN training with an improved synthesis quality, given sub-hundred high-fidelity images and a limited computing resource. On thirteen datasets with a diverse content variation, we show that a skip-layer channel-wise excitation mechanism (SLE) and a self-supervised regularization on the discriminator significantly boost the synthesis performance of GAN. Both proposed techniques require minor changes to a vanilla GAN, enhancing GAN's practicality with a desirable plug-and-play property. We hope this work can benefit downstream tasks of GAN and provide new study perspectives for future research.
|
| 210 |
+
|
| 211 |
+
# REFERENCES
|
| 212 |
+
|
| 213 |
+
Rameen Abdal, Yipeng Qin, and Peter Wonka. Image2stylegan: How to embed images into the stylegan latent space? In Proceedings of the IEEE international conference on computer vision, pp. 4432-4441, 2019.
|
| 214 |
+
Martin Arjovsky and Léon Bottou. Towards principled methods for training generative adversarial networks. In International Conference on Learning Representations, 2017.
|
| 215 |
+
Martin Arjovsky, Soumith Chintala, and Léon Bottou. Wasserstein generative adversarial networks. In International conference on machine learning, pp. 214-223. PMLR, 2017.
|
| 216 |
+
David Berthelot, Thomas Schumm, and Luke Metz. Began: Boundary equilibrium generative adversarial networks. arXiv preprint arXiv:1703.10717, 2017.
|
| 217 |
+
Andrew Brock, Jeff Donahue, and Karen Simonyan. Large scale GAN training for high fidelity natural image synthesis. In International Conference on Learning Representations, 2019.
|
| 218 |
+
Ting Chen, Xiaohua Zhai, Marvin Ritter, Mario Lucic, and Neil Houlsby. Self-supervised gans via auxiliary rotation loss. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 12154-12163, 2019.
|
| 219 |
+
Yann N Dauphin, Angela Fan, Michael Auli, and David Grangier. Language modeling with gated convolutional networks. In International conference on machine learning, pp. 933-941, 2017.
|
| 220 |
+
Emily L Denton, Soumith Chintala, Rob Fergus, et al. Deep generative image models using a laplacian pyramid of adversarial networks. In Advances in neural information processing systems, pp. 1486-1494, 2015.
|
| 221 |
+
Ahmed Elgammal, Marian Mazzone, et al. Artists, artificial intelligence and machine-based creativity in playform. Artnodes, (26):1-8, 2020.
|
| 222 |
+
Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in neural information processing systems, pp. 2672-2680, 2014.
|
| 223 |
+
Priya Goyal, Dhruv Mahajan, Abhinav Gupta, and Ishan Misra. Scaling and benchmarking self-supervised visual representation learning. In Proceedings of the IEEE International Conference on Computer Vision, pp. 6391-6400, 2019.
|
| 224 |
+
Ishaan Gulrajani, Faruk Ahmed, Martin Arjovsky, Vincent Dumoulin, and Aaron C Courville. Improved training of wasserstein gans. In Advances in neural information processing systems, pp. 5767-5777, 2017.
|
| 225 |
+
Yong Guo, Qi Chen, Jian Chen, Qingyao Wu, Qinfeng Shi, and Mingkui Tan. Auto-embedding generative adversarial networks for high resolution image synthesis. IEEE Transactions on Multimedia, 21(11):2726-2737, 2019.
|
| 226 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.
|
| 227 |
+
Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9729-9738, 2020.
|
| 228 |
+
Dan Hendrycks, Mantas Mazeika, Saurav Kadavath, and Dawn Song. Using self-supervised learning can improve model robustness and uncertainty. In Advances in Neural Information Processing Systems, pp. 15663-15674, 2019.
|
| 229 |
+
Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. In Advances in neural information processing systems, pp. 6626-6637, 2017.
|
| 230 |
+
|
| 231 |
+
Jie Hu, Li Shen, and Gang Sun. Squeeze-and-excitation networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 7132-7141, 2018.
|
| 232 |
+
Xun Huang and Serge Belongie. Arbitrary style transfer in real-time with adaptive instance normalization. In Proceedings of the IEEE International Conference on Computer Vision, pp. 1501-1510, 2017.
|
| 233 |
+
Xun Huang, Yixuan Li, Omid Poursaeed, John Hopcroft, and Serge Belongie. Stacked generative adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 5077-5086, 2017.
|
| 234 |
+
Longlong Jing and Yingli Tian. Self-supervised visual feature learning with deep neural networks: A survey. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020.
|
| 235 |
+
Animesh Karnewar and Oliver Wang. *Msg-gan: Multi-scale gradients for generative adversarial networks*. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 7799-7808, 2020.
|
| 236 |
+
Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. Progressive growing of GANs for improved quality, stability, and variation. In International Conference on Learning Representations, 2018.
|
| 237 |
+
Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 4401-4410, 2019.
|
| 238 |
+
Tero Karras, Miika Aittala, Janne Hellsten, Samuli Laine, Jaakko Lehtinen, and Timo Aila. Training generative adversarial networks with limited data. arXiv preprint arXiv:2006.06676, 2020a.
|
| 239 |
+
Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. Analyzing and improving the image quality of stylegan. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8110-8119, 2020b.
|
| 240 |
+
Anders Boesen Lindbo Larsen, Søren Kaae Sønderby, Hugo Larochelle, and Ole Winther. Autoencoding beyond pixels using a learned similarity metric. In International conference on machine learning, pp. 1558-1566. PMLR, 2016.
|
| 241 |
+
Jae Hyun Lim and Jong Chul Ye. Geometric gan. arXiv preprint arXiv:1705.02894, 2017.
|
| 242 |
+
Zachary C. Lipton and Subarna Tripathi. Precise recovery of latent vectors from generative adversarial networks. ICLR workshop, 2017.
|
| 243 |
+
Bingchen Liu, Kunpeng Song, Yizhe Zhu, Gerard de Melo, and Ahmed Elgammal. Time: Text and image mutual-translation adversarial networks. In Thirty-Fifth AAAI Conference on Artificial Intelligence, 2021.
|
| 244 |
+
Lars Mescheder, Andreas Geiger, and Sebastian Nowozin. Which training methods for gans do actually converge? In International conference on machine learning, pp. 3481-3490. PMLR, 2018.
|
| 245 |
+
Takeru Miyato, Toshiki Kataoka, Masanori Koyama, and Yuichi Yoshida. Spectral normalization for generative adversarial networks. In International Conference on Learning Representations, 2018.
|
| 246 |
+
Sangwoo Mo, Minsu Cho, and Jinwoo Shin. Freeze discriminator: A simple baseline for fine-tuning gans. arXiv preprint arXiv:2002.10964, 2020.
|
| 247 |
+
Mkhuseli Ngxande, Jules-Raymond Tapamo, and Michael Burke. Depthwisegans: Fast training generative adversarial networks for realistic image synthesis. In 2019 Southern African Universities Power Engineering Conference/Robotics and Mechatronics/Pattern Recognition Association of South Africa (SAUPEC/RobMech/PRASA), pp. 111-116. IEEE, 2019.
|
| 248 |
+
Maria-Elena Nilsback and Andrew Zisserman. A visual vocabulary for flower classification. In IEEE Conference on Computer Vision and Pattern Recognition, volume 2, pp. 1447-1454, 2006.
|
| 249 |
+
|
| 250 |
+
Atsuhiro Noguchi and Tatsuya Harada. Image generation from small datasets via batch statistics adaptation. In Proceedings of the IEEE International Conference on Computer Vision, pp. 2750-2758, 2019.
|
| 251 |
+
Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017.
|
| 252 |
+
Alec Radford, Luke Metz, and Soumith Chintala. Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint arXiv:1511.06434, 2015.
|
| 253 |
+
Esther Robb, Wen-Sheng Chu, Abhishek Kumar, and Jia-Bin Huang. Few-shot adaptation of generative adversarial networks. arXiv preprint arXiv:2010.11943, 2020.
|
| 254 |
+
Zhangzhang Si and Song-Chun Zhu. Learning hybrid image templates (hit) by information projection. IEEE Transactions on pattern analysis and machine intelligence, 34(7):1354-1367, 2011.
|
| 255 |
+
Samarth Sinha, Han Zhang, Anirudh Goyal, Yoshua Bengio, Hugo Larochelle, and Augustus Odena. Small-gan: Speeding up gan training using core-sets. arXiv preprint arXiv:1910.13540, 2019.
|
| 256 |
+
Dustin Tran, Rajesh Ranganath, and David M Blei. Deep and hierarchical implicit models. arXiv preprint arXiv:1702.08896, 7(3):13, 2017.
|
| 257 |
+
Ngoc-Trung Tran, Viet-Hung Tran, Bao-Ngoc Nguyen, Linxiao Yang, and Ngai-Man Man Cheung. Self-supervised gan: Analysis and improvement with multi-class minimax game. Advances in Neural Information Processing Systems, 32:13253-13264, 2019.
|
| 258 |
+
Dmitry Ulyanov, Andrea Vedaldi, and Victor Lempitsky. Instance normalization: The missing ingredient for fast stylization. arXiv preprint arXiv:1607.08022, 2016.
|
| 259 |
+
Ting-Chun Wang, Ming-Yu Liu, Jun-Yan Zhu, Andrew Tao, Jan Kautz, and Bryan Catanzaro. High-resolution image synthesis and semantic manipulation with conditional gans. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 8798-8807, 2018.
|
| 260 |
+
Yaxing Wang, Abel Gonzalez-Garcia, David Berga, Luis Herranz, Fahad Shahbaz Khan, and Joost van de Weijer. Minegan: effective knowledge transfer from gans to target domains with few images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9332-9341, 2020.
|
| 261 |
+
Yasin Yazici, Chuan-Sheng Foo, Stefan Winkler, Kim-Hui Yap, Georgios Piliouras, and Vijay Chandrasekhar. The unusual effectiveness of averaging in gan training. arXiv preprint arXiv:1806.04498, 2018.
|
| 262 |
+
Dan Zhang and Anna Khoreva. Pa-gan: Improving gan training by progressive augmentation. 2018.
|
| 263 |
+
Han Zhang, Tao Xu, Hongsheng Li, Shaoting Zhang, Xiaogang Wang, Xiaolei Huang, and Dimitris N Metaxas. Stackgan: Text to photo-realistic image synthesis with stacked generative adversarial networks. In Proceedings of the IEEE international conference on computer vision, pp. 5907-5915, 2017.
|
| 264 |
+
Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 586-595, 2018.
|
| 265 |
+
Junbo Zhao, Michael Mathieu, and Yann LeCun. Energy-based generative adversarial network. arXiv preprint arXiv:1609.03126, 2016.
|
| 266 |
+
Shengyu Zhao, Zhijian Liu, Ji Lin, Jun-Yan Zhu, and Song Han. Differentiable augmentation for data-efficient gan training. arXiv preprint arXiv:2006.10738, 2020.
|
| 267 |
+
Jiachen Zhong, Xuanqing Liu, and Cho-Jui Hsieh. Improving the speed and quality of gan by adversarial training. arXiv preprint arXiv:2008.03364, 2020.
|
| 268 |
+
Jiapeng Zhu, Yujun Shen, Deli Zhao, and Bolei Zhou. In-domain gan inversion for real image editing. arXiv preprint arXiv:2004.00049, 2020.
|
| 269 |
+
|
| 270 |
+
Jun-Yan Zhu, Philipp Krahenbuhl, Eli Shechtman, and Alexei A Efros. Generative visual manipulation on the natural image manifold. In European conference on computer vision, pp. 597-613. Springer, 2016.
|
towardsfasterandstabilizedgantrainingforhighfidelityfewshotimagesynthesis/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b802037b6ef10a7925cecb68e43d08a29a6e81f06be4e249e827bfd1b82d6d91
|
| 3 |
+
size 804719
|
towardsfasterandstabilizedgantrainingforhighfidelityfewshotimagesynthesis/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:89556a4643810628f2c3584685f5b930e6417b1fb9de4e2d6761c210459b7806
|
| 3 |
+
size 430306
|
towardsimpartialmultitasklearning/9998397f-fd2e-4bb6-9a26-ea785dd420a2_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:90abf49c011370e4a83fc97b074ec730353eff29d482fb72aa6678395dcec1f9
|
| 3 |
+
size 125676
|
towardsimpartialmultitasklearning/9998397f-fd2e-4bb6-9a26-ea785dd420a2_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:97bec35497ed76933144d86bbd94e30ddd456a6d9a88f8e9666a844eb9cf2fa0
|
| 3 |
+
size 146465
|
towardsimpartialmultitasklearning/9998397f-fd2e-4bb6-9a26-ea785dd420a2_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:76b4f732269a7cb3e4b112092a18585e182de69c23df9d11156ad1864562beaa
|
| 3 |
+
size 41803668
|
towardsimpartialmultitasklearning/full.md
ADDED
|
@@ -0,0 +1,504 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TOWARDS IMPARTIAL MULTI-TASK LEARNING
|
| 2 |
+
|
| 3 |
+
Liyang Liu $^{1}$ , Yi Li $^{2}$ , Zhanghui Kuang $^{2}$ , Jing-Hao Xue $^{3}$ , Yimin Chen $^{2}$ , Wenming Yang $^{1,*}$ , Qingmin Liao $^{1}$ , Wayne Zhang $^{2,4}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>Shenzhen International Graduate School/Department of Electronic Engineering, Tsinghua University
|
| 6 |
+
$^{2}$ SenseTime Research
|
| 7 |
+
$^{3}$ Department of Statistical Science, University College London
|
| 8 |
+
$^{4}$ Qing Yuan Research Institute, Shanghai Jiao Tong University
|
| 9 |
+
|
| 10 |
+
{liu-ly14@mails., yang.wenming@sz., liaoqm@}tsinghua.edu.cn
|
| 11 |
+
|
| 12 |
+
{liyi,kuangzhanghui,chenyimin,wayne.zhang}@sensetime.com
|
| 13 |
+
|
| 14 |
+
jinghao.xue@ucl.ac.uk
|
| 15 |
+
|
| 16 |
+
# ABSTRACT
|
| 17 |
+
|
| 18 |
+
Multi-task learning (MTL) has been widely used in representation learning. However, naively training all tasks simultaneously may lead to the partial training issue, where specific tasks are trained more adequately than others. In this paper, we propose to learn multiple tasks impartially. Specifically, for the task-shared parameters, we optimize the scaling factors via a closed-form solution, such that the aggregated gradient (sum of raw gradients weighted by the scaling factors) has equal projections onto individual tasks. For the task-specific parameters, we dynamically weigh the task losses so that all of them are kept at a comparable scale. Further, we find the above gradient balance and loss balance are complementary and thus propose a hybrid balance method to further improve the performance. Our impartial multi-task learning (IMTL) can be end-to-end trained without any heuristic hyper-parameter tuning, and is general to be applied on all kinds of losses without any distribution assumption. Moreover, our IMTL can converge to similar results even when the task losses are designed to have different scales, and thus it is scale-invariant. We extensively evaluate our IMTL on the standard MTL benchmarks including Cityscapes, NYUv2 and CelebA. It outperforms existing loss weighting methods under the same experimental settings.
|
| 19 |
+
|
| 20 |
+
# 1 INTRODUCTION
|
| 21 |
+
|
| 22 |
+
Recent deep networks in computer vision can match or even surpass human beings on some specific tasks separately. However, in reality multiple tasks (e.g., semantic segmentation and depth estimation) must be solved simultaneously. Multi-task learning (MTL) (Caruana, 1997; Evgeniou & Pontil, 2004; Ruder, 2017; Zhang & Yang, 2017) aims at sharing the learned representation among tasks (Zamir et al., 2018) to make them benefit from each other and achieve better results and stronger robustness (Zamir et al., 2020). However, sharing the representation can lead to a partial learning issue: some specific tasks are learned well while others are overlooked, due to the different loss scales or gradient magnitudes of various tasks and the mutual competition among them. Several methods have been proposed to mitigate this issue either via gradient balance such as gradient magnitude normalization (Chen et al., 2018) and Pareto optimality (Sener & Koltun, 2018), or loss balance like homoscedastic uncertainty (Kendall et al., 2018). Gradient balance can evenly learn task-shared parameters while ignoring task-specific ones. Loss balance can prevent MTL from being biased in favor of tasks with large loss scales but cannot ensure the impartial learning of the shared parameters. In this work, we find that gradient balance and loss balance are complementary, and combining the two balances can further improve the results. To this end, we propose impartial MTL (IMTL) via simultaneously balancing gradients and losses across tasks.
|
| 23 |
+
|
| 24 |
+
For gradient balance, we propose IMTL-G(rad) to learn the scaling factors such that the aggregated gradient of task-shared parameters has equal projections onto the raw gradients of individual tasks
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
(a) GradNorm
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
(b) MGDA
|
| 31 |
+
Figure 1: Comparison of gradient balance methods. In (a) to (d), $g_{1}$ , $g_{2}$ and $g_{3}$ represent the gradient computed by the raw loss of each task, respectively. The gray surface represents the plane composed by these gradients. The red arrow denotes the aggregated gradient computed by the weighted sum loss, which is ultimately used to update the model parameters. The blue arrows show the projections of $g$ onto the raw gradients $\{g_{t}\}$ . $g$ has the largest projection on $g_{2}$ (nearest to the mean direction), $g_{3}$ (smallest magnitude) and $g_{2}$ (largest magnitude) for GradNorm, MGDA and PCGrad, respectively, while the projections are equal on $\{g_{t}\}$ in our IMTL-G.
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
(c) PCGrad
|
| 35 |
+
|
| 36 |
+

|
| 37 |
+
(d) IMTL-G
|
| 38 |
+
|
| 39 |
+
(see Fig. 1 (d)). We show that the scaling factor optimization problem is equivalent to finding the angle bisector of gradients from all tasks in geometry, and derive a closed-form solution to it. In contrast with previous gradient balance methods such as GradNorm (Chen et al., 2018), MGDA (Sener & Koltun, 2018) and PCGrad (Yu et al., 2020), which have learning biases in favor of tasks with gradients close to the average gradient direction, those with small gradient magnitudes, and those with large gradient magnitudes, respectively (see Fig. 1 (a), (b) and (c)), in our IMTL-G task-shared parameters can be updated without bias to any task.
|
| 40 |
+
|
| 41 |
+
For loss balance, we propose IMTL-L(oss) to automatically learn a loss weighting parameter for each task so that the weighted losses have comparable scales and the effect of different loss scales from various tasks can be canceled-out. Compared with uncertainty weighting (Kendall et al., 2018), which has biases towards regression tasks rather than classification tasks, our IMTL-L treats all tasks equivalently without any bias. Besides, we model the loss balance problem from the optimization perspective without any distribution assumption that is required by (Kendall et al., 2018). Therefore, ours is more general and can be used in any kinds of losses. Moreover, the loss weighting parameters and the network parameters can be jointly learned in an end-to-end fashion in IMTL-L.
|
| 42 |
+
|
| 43 |
+
Further, we find the above two balances are complementary and can be combined to improve the performance. Specifically, we apply IMTL-G on the task-shared parameters and IMTL-L on the task-specific parameters, leading to the hybrid balance method IMTL. Our IMTL is scale-invariant: the model can converge to similar results even when the same task is designed to have different loss scales, which is common in practice. For example, the scale of the cross-entropy loss in semantic segmentation may have different scales when using "average" or "sum" reduction over locations in the loss computation. We empirically validate that our IMTL is more robust against heavy loss scale changes than its competitors. Meanwhile, our IMTL only adds negligible computational overheads.
|
| 44 |
+
|
| 45 |
+
We extensively evaluate our proposed IMTL on standard benchmarks: Cityscapes, NYUv2 and CelebA, where the experimental results show that IMTL achieves superior performances under all settings. Besides, considering there lacks a fair and practical benchmark for comparing MTL methods, we unify the experimental settings such as image resolution, data augmentation, network structure, learning rate and optimizer option. We re-implement and compare with the representative MTL methods in a unified framework, which will be publicly available. Our contributions are:
|
| 46 |
+
|
| 47 |
+
- We propose a novel closed-form gradient balance method, which learns task-shared parameters without any task bias; and we develop a general learnable loss balance method, where no distribution assumption is required and the scale parameters can be jointly trained with the network parameters.
|
| 48 |
+
- We unveil that gradient balance and loss balance are complementary and accordingly propose a hybrid balance method to simultaneously balance gradients and losses.
|
| 49 |
+
- We validate that our proposed IMTL is loss scale-invariant and is more robust against loss scale changes compared with its competitors, and we give in-depth theoretical and experimental analyses on its connections and differences with previous methods.
|
| 50 |
+
- We extensively verify the effectiveness of our IMTL. For fair comparisons, a unified codebase will also be publicly available, where more practical settings are adopted and stronger performances are achieved compared with existing code-bases.
|
| 51 |
+
|
| 52 |
+
# 2 RELATED WORK
|
| 53 |
+
|
| 54 |
+
Recent advances in MTL mainly come from two aspects: network structure improvements and loss weighting developments. Network-structure methods based on soft parameter-sharing usually lead to high inference cost (review in Appendix A). Loss weighting methods find loss weights to be multiplied on the raw losses for model optimization. They employ a hard parameter-sharing paradigm (Ruder, 2017), where several light-weight task-specific heads are attached upon the heavy-weight task-agnostic backbone. There are also efforts that learn to group tasks and branch the network in the middle layers (Guo et al., 2020; Standley et al., 2020), which try to achieve better accuracy-efficiency trade-off and can be seen as semi-hard parameter-sharing. We believe task grouping and loss weighting are orthogonal and complementary directions to facilitate multi-task learning and can benefit from each other. In this work we focus on loss weighting methods which are the most economic as almost all of the computations are shared across tasks, leading to high inference speed. Task Prioritization (Guo et al., 2018) weights task losses by their difficulties to focus on the harder tasks during training. Uncertainty weighting (Kendall et al., 2018) models the loss weights as data-agnostic task-dependent homoscedastic uncertainty. Then loss weighting is derived from maximum likelihood estimation. GradNorm (Chen et al., 2018) learns the loss weights to enforce the norm of the scaled gradient for each task to be close. MGDA (Sener & Koltun, 2018) casts multi-task learning as multi-object optimization and finds the minimum-norm point in the convex hull composed by the gradients of multiple tasks. Pareto optimality is supposed to be achieved under mild conditions. GLS (Chennupati et al., 2019) instead uses the geometric mean of task-specific losses as the target loss, we will show it actually weights the loss by its reciprocal value. PCGrad (Yu et al., 2020) avoids interferences between tasks by projecting the gradient of one task onto the normal plane of the other. DSG (Lu et al., 2020) dynamically makes a task "stop or go" by its converging state, where a task is updated only once for a while if it is stopped. Although many loss weighting methods have been proposed, they are seldom open-sourced and rarely compared thoroughly under practical settings where strong performances are achieved, which motivates us to give an in-depth analysis and a fair comparison about them.
|
| 55 |
+
|
| 56 |
+
# 3 IMPARTIAL MULTI-TASK LEARNING
|
| 57 |
+
|
| 58 |
+
In MTL, we map a sample $\pmb{x} \in \mathbb{X}$ to its labels $\{\pmb{y}_t \in \mathbb{Y}_t\}_{t \in [1,T]}$ of all $T$ tasks through multiple task-specific mappings $\{\pmb{f}_t : \mathbb{X} \to \mathbb{Y}_t\}$ . In most loss weighting methods, the hard parameter-sharing paradigm is employed, such that $\pmb{f}_t$ is parameterized by heavy-weight task-shared parameters $\pmb{\theta}$ and light-weight task-specific parameters $\pmb{\theta}_t$ . All tasks take the same shared intermediate feature $z = \pmb{f}(\pmb{x}; \pmb{\theta})$ as input, and the $t$ -th task head outputs the prediction as $\pmb{f}_t(\pmb{x}) = \pmb{f}_t(\pmb{z}; \pmb{\theta}_t)$ . We aim to find the scaling factors $\{\alpha_t\}$ for all $T$ task losses $\{L_t(\pmb{f}_t(\pmb{x}), \pmb{y}_t)\}$ , so that the weighted sum loss $L = \sum_{t} \alpha_t L_t$ can be optimized to make all tasks perform well. This poses great challenges because: 1) losses may have distinguished forms such as cross-entropy loss and cosine similarity; 2) the dynamic ranges of losses may differ by orders of magnitude. In this work, we propose a hybrid solution for both the task-shared parameters $\pmb{\theta}$ and the task-specific parameters $\{\pmb{\theta}_t\}$ , as Fig. 2.
|
| 59 |
+
|
| 60 |
+
# 3.1 GRADIENT BALANCE: IMTL-G
|
| 61 |
+
|
| 62 |
+
For task-shared parameters $\theta$ , we can receive $T$ gradients $\{\pmb{g}_t = \nabla_\theta L_t\}$ via back-propagation from all of the $T$ raw losses $\{L_t\}$ , and these gradients represent optimal update directions for individual tasks. As the parameters $\theta$ can only be updated with a single gradient, we should compute an aggregated gradient $\pmb{g}$ by the linear combination of $\{\pmb{g}_t\}$ . It also implies to find the scaling factors $\{\alpha_t\}$ of raw losses $\{L_t\}$ , since $\pmb{g} = \sum_{t} \alpha_t \pmb{g}_t = \nabla_\theta L = \nabla_\theta \left( \sum_{t} \alpha_t L_t \right)$ . Motivated by the principle of balance among tasks, we propose to make the projections of $\pmb{g}$ onto $\{\pmb{g}_t\}$ to be equal, as Fig. 1 (d). In this way,
|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
Figure 2: Overview of IMTL.
|
| 66 |
+
|
| 67 |
+
Algorithm 1 Training by Impartial Multi-task Learning
|
| 68 |
+
Input: input sample $\pmb{x}$ , task-specific labels $\{\pmb{y}_t\}$ and learning rate $\eta$
|
| 69 |
+
Output: task-shared/-specific parameters $\theta /\{\theta_t\}$ , scale parameters $\{s_t\}$
|
| 70 |
+
1: compute task-shared feature $z = f(x;\theta)$
|
| 71 |
+
2: for $t = 1$ to $T$ do
|
| 72 |
+
3: compute task prediction by head network $\pmb {f}_t(\pmb {x}) = \pmb {f}_t^{\mathrm{net}}(\pmb {z};\pmb {\theta}_t)$
|
| 73 |
+
4: compute raw loss by loss function $L_{t}^{\mathrm{raw}} = L_{t}^{\mathrm{func}}(\pmb {f}_{t}(\pmb {x}),\pmb {y}_{t})$
|
| 74 |
+
5: compute scaled loss $L_{t} = ba^{s_{t}}L_{t}^{\mathrm{raw}} - s_{t}$ (default $a = e,b = 1$ ) $\triangleright$ loss balance
|
| 75 |
+
6: compute gradient of shared feature $\pmb {z}$ .. $\pmb {g}_t = \nabla_zL_t$
|
| 76 |
+
7: compute unit-norm gradient $\pmb {u}_t = \frac{\pmb{g}_t}{\|\pmb{g}_t\|}$
|
| 77 |
+
8: end for
|
| 78 |
+
9: compute gradient differences $D^{\top} = [g_1^{\top} - g_2^{\top},\dots ,g_1^{\top} - g_T^{\top}]$
|
| 79 |
+
10: compute unit-norm gradient differences $U^{\top} = [u_{1}^{\top} - u_{2}^{\top},\dots ,u_{1}^{\top} - u_{T}^{\top}]$
|
| 80 |
+
11: compute scaling factors for tasks 2 to $T$ .. $\alpha_{2:T} = g_1U^\top (DU^\top)^{-1}$ $\triangleright$ gradient balance
|
| 81 |
+
12: compute scaling factors for all tasks: $\alpha = [1 - 1\alpha_{2:T}^{\top},\alpha_{2:T}]$
|
| 82 |
+
13: update task-shared parameters $\theta = \theta -\eta \nabla_{\theta}\left(\sum_{t}\alpha_{t}L_{t}\right)$
|
| 83 |
+
14: for $t = 1$ to $T$ do
|
| 84 |
+
15: update task-specific parameters $\theta_t = \theta_t - \eta \nabla_\theta tL_t$
|
| 85 |
+
16: update loss scale parameter $s_t = s_t - \eta \frac{\partial L_t}{\partial s_t}$
|
| 86 |
+
17: end for
|
| 87 |
+
|
| 88 |
+
we treat all tasks equally so that they progress in the same speed and none is left behind. Formally, let $\{\pmb{u}_t = \pmb{g}_t / \| \pmb{g}_t\|\}$ denote the unit-norm vector of $\{\pmb{g}_t\}$ which are row vectors, then we have:
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
\boldsymbol {g} \boldsymbol {u} _ {1} ^ {\top} = \boldsymbol {g} \boldsymbol {u} _ {t} ^ {\top} \Leftrightarrow \boldsymbol {g} \left(\boldsymbol {u} _ {1} - \boldsymbol {u} _ {t}\right) ^ {\top} = 0, \forall 2 \leqslant t \leqslant T. \tag {1}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
The above problem is under-determined, but we can obtain the closed-form results of $\{\alpha_t\}$ by constraining $\sum_{t} \alpha_{t} = 1$ . Assume $\pmb{\alpha} = [\alpha_{2}, \dots, \alpha_{T}]$ , $\pmb{U}^{\top} = [\pmb{u}_{1}^{\top} - \pmb{u}_{2}^{\top}, \dots, \pmb{u}_{1}^{\top} - \pmb{u}_{T}^{\top}]$ , $\pmb{D}^{\top} = [\pmb{g}_{1}^{\top} - \pmb{g}_{2}^{\top}, \dots, \pmb{g}_{1}^{\top} - \pmb{g}_{T}^{\top}]$ and $\mathbf{1} = [1, \dots, 1]$ , from Eq. (1) we can obtain:
|
| 95 |
+
|
| 96 |
+
$$
|
| 97 |
+
\boldsymbol {\alpha} = \mathbf {g} _ {1} \boldsymbol {U} ^ {\top} \left(\boldsymbol {D} \boldsymbol {U} ^ {\top}\right) ^ {- 1}. \quad (\text {I M T L - G}) \tag {2}
|
| 98 |
+
$$
|
| 99 |
+
|
| 100 |
+
The detailed derivation is in Appendix B.1. After obtaining $\alpha$ , the scaling factor of the first task can be computed by $\alpha_{1} = 1 - 1\alpha^{\top}$ since $\sum_{t}\alpha_{t} = 1$ . The optimized $\{\alpha_{t}\}$ are used to compute $L = \sum_{t}\alpha_{t}L_{t}$ , which is ultimately minimized by SGD to update the model. By now, back-propagation needs to be executed $T$ times to obtain the gradient of each task loss with respect to the heavy-weight task-shared parameters $\theta$ , which is time-consuming and non-scalable. We replace the parameter-level gradients $\{g_t = \nabla_\theta L_t\}$ with feature-level gradients $\{\nabla_z L_t\}$ to compute $\{\alpha_{t}\}$ . This implies to achieve gradient balance with respect to the last shared feature $z$ as a surrogate of task-shared parameters $\theta$ , since it is possible for the network to back-propagate this balance all the way through the task-shared backbone starting from $z$ . This relaxation allows us to do back propagation through the backbone only once after obtaining $\{\alpha_{t}\}$ , and thus the training time can be dramatically reduced.
|
| 101 |
+
|
| 102 |
+
# 3.2 LOSS BALANCE: IMTL-L
|
| 103 |
+
|
| 104 |
+
For the task-specific parameters $\{\theta_t\}$ , we cannot employ IMTL-G described above, because $\nabla_{\theta_t}L_\tau = 0$ , $\forall t \neq \tau$ , and thus only the gradient of the corresponding task $\nabla_{\theta_t}L_t$ can be obtained for each $\theta_t$ . Instead we propose to balance the losses among tasks by forcing the scaled losses $\{\alpha_tL_t\}$ to be constant for all tasks, without loss of generality, we take the constant as 1. Then the most direct idea is to compute the scaling factors as $\{\alpha_t = 1 / L_t\}$ , but they are sensitive to outlier samples and manifest severe oscillations, so we further propose to learn to scale losses via gradient descent and thus stronger stability can be achieved. Suppose the positive losses $\{L_t > 0\}$ are to be balanced, we first introduce a mapping function $h: \mathbb{R} \to \mathbb{R}^+$ to transform the arbitrarily-ranged learnable scale parameters $\{s_t\}$ to positive scaling factors $\{h(s_t) > 0\}$ , hereafter we abandon the subscript $t$ for brevity. Then we should construct an appropriate scaled loss $g(s)$ so that both network parameters $\theta$ and scale parameter $s$ can be optimized by minimizing $g(s)$ . On one hand, we balance different
|
| 105 |
+
|
| 106 |
+
tasks by encouraging the scaled losses $h(s)L(\pmb{\theta})$ to be 1 for all tasks, so the optimality $s^{\star}$ of $s$ is achieved when $h(s)L(\pmb{\theta}) = 1$ , or equivalently:
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
f (s) \equiv h (s) L (\boldsymbol {\theta}) - 1 = 0, \text {i f} s = s ^ {\star}. \tag {3}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
One may expect to minimize $|f(s)| = |h(s)L(\pmb{\theta}) - 1|$ to find $s^{\star}$ , however when $h(s)L(\pmb{\theta}) < 1$ , the gradient with respect to $\pmb{\theta}$ , $\nabla_{\pmb{\theta}}|f(s)| = -h(s)\nabla_{\pmb{\theta}}L(\pmb{\theta})$ , is in the opposite direction. On the other hand, assume our scaled loss $g(s)$ is a differentiable convex function with respect to $s$ , then its minimum is achieved if and only if $s = s^{\star}$ , where the derivative of $g(s)$ is zero:
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
g ^ {\prime} (s) = 0, \text {i f} s = s ^ {\star}. \tag {4}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
From Eq. (3) and (4) we find that the values of $f(s)$ and $g^{\prime}(s)$ are both 0 when $s = s^{\star}$ , we can then regard $f(s)$ as the derivative of $g(s)$ , which is our target scaled loss and used to optimize both the network parameters $\theta$ and loss scale parameter $s$ , then we have:
|
| 119 |
+
|
| 120 |
+
$$
|
| 121 |
+
g ^ {\prime} (s) = f (s) \Leftrightarrow g (s) = \int f (s) d s = L (\theta) \int h (s) d s - s. \tag {5}
|
| 122 |
+
$$
|
| 123 |
+
|
| 124 |
+
From Eq. (3) and (5), we notice that both $h(s)$ and $\int h(s) \, \mathrm{d}s$ denote loss scales, so we have $\int h(s) \, \mathrm{d}s = Ch(s)$ , where $C > 0$ is a constant. According to ordinary differential equation, $\int h(s) \, \mathrm{d}s$ must be the exponential function: $\int h(s) \, \mathrm{d}s = ba^s$ with $a > 1, b > 0$ (see Appendix B.2). We then have $g''(s) = ka^s$ , $k > 0$ , which is always positive and verifies our assumption about the convexity of $g(s)$ . Also note that the gradient of $g(s)$ with respect to $\pmb{\theta}$ , $\nabla_{\pmb{\theta}}g(s) = \int h(s) \, \mathrm{d}s\nabla_{\pmb{\theta}}L(\pmb{\theta}) = ba^s\nabla_{\pmb{\theta}}L(\pmb{\theta})$ , is in the appropriate direction since $ba^s > 0$ . As an instantiation, we set $\int h(s) \, \mathrm{d}s = e^s (a = e, b = 1)$ , then
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
g (s) = e ^ {s} L (\boldsymbol {\theta}) - s, \quad \text {(I M T L - L)}. \tag {6}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
From Eq. (6) we find that the raw loss is scaled by $e^s$ , and $-s$ acts as a regularization to avoid the trivial solution $s = -\infty$ while minimizing the scaled loss $g(s)$ . As for implementation, the task losses $\{L_t\}$ are scaled by $\{e^{s_t}\}$ , and the scaled losses $\{e^{s_t}L - s_t\}$ are used to update both the network parameters $\theta$ , $\{\theta_t\}$ and the scale parameters $\{s_t\}$ .
|
| 131 |
+
|
| 132 |
+
# 3.3 HYBRID BALANCE: IMTL
|
| 133 |
+
|
| 134 |
+
We have introduced IMTL-G/IMTL-L to achieve gradient/loss balance, and both of them produce scaling factors to be applied on the raw losses. They can be used solely, but we find them complementary and able to be combined to improve the performance. In IMTL-G, even if the raw losses are multiplied by arbitrary (maybe different among tasks) positive factors, the direction of the aggregated gradient $\pmb{g}$ stays unchanged. Because by definition $\pmb{g} = \sum_{t} \alpha_{t} \pmb{g}_{t}$ is the angular bisector of the gradients $\{\pmb{g}_{t}\}$ , and positive scaling will not change the directions of $\{\pmb{g}_{t}\}$ and thus that of $\pmb{g}$ (proof in Theorem 2). So we can also obtain the scale factors $\{\alpha_{t}\}$ in IMTL-G with the losses that have been scaled by $\{s_{t}\}$ from IMTL-L. IMTL-G and IMTL-L are combined as: 1) the task-specific parameters $\{\theta_{t}\}$ and scale parameters $\{s_{t}\}$ are updated by scaled losses $\{e^{s_{t}} L_{t} - s_{t}\}$ ; 2) the task-shared parameters $\pmb{\theta}$ are updated by $\sum_{t} \alpha_{t} (e^{s_{t}} L_{t})$ which is the weighted average of $\{e^{s_{t}} L_{t}\}$ , with the weights $\{\alpha_{t}\}$ computed by $\{\nabla_{z} (e^{s_{t}} L_{t})\}$ using IMTL-G. Note that the regularization terms $\{-s_{t}\}$ in Eq. (6) are constants with respect to $\pmb{\theta}$ and $z$ , and thus can be ignored when computing gradients and updating parameters in IMTL-G. In this way, we achieve both gradient balance for task-shared parameters and loss balance for task-specific parameters, leading to our full IMTL as illustrated in Alg. 1.
|
| 135 |
+
|
| 136 |
+
# 4 DISCUSSION
|
| 137 |
+
|
| 138 |
+
We draw connections between our method and previous state-of-the-arts in Fig. 3. We will show that previous methods can all be categorized as gradient or loss balance, and thus each of them can be seen as a specification of our method. However, all of them have some intrinsic biases or short-comings leading to inferior performances, which we try to overcome.
|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
Figure 3: Relationship between our IMTL and previous methods. The blue dashed arrow indicates the characteristic of each method. In the loss balance methods, we annotate the scaled loss in the bracket. $L_{\mathrm{cls}}$ , $L_{\mathrm{reg}}$ and $L_{t}$ are the raw loss of classification, regression and individual task, respectively. $\alpha_{\mathrm{cls}}$ , $\alpha_{\mathrm{reg}}$ and $\alpha_{t}$ is the corresponding loss scale. $L$ is the geometric mean loss and $T$ is the task number. In the gradient balance methods, we annotate the projections of the aggregated gradient $\pmb{g} = \sum_{t}\alpha_{t}\pmb{g}_{t}$ onto the raw gradient $\pmb{g}_{t}$ of the $t$ -th task in the bracket. $\pmb{u}_t = \pmb{g}_t / \| \pmb{g}_t\|$ is the unit-norm vector, $p_t = g u_t^\top$ is the projection of $\pmb{g}$ onto $\pmb{g}_t$ and $\pmb{u}_s = \sum_t\pmb{u}_t$ is the mean direction.
|
| 142 |
+
|
| 143 |
+
GradNorm (Chen et al., 2018) balances tasks by making the norm of the scaled gradient for each task to be approximately equal. It also introduces the inverse training rate and a hyper-parameter $\gamma$ to control the strength of approaching the mean gradient norm, such that tasks which learn slower can receive larger gradient magnitudes. However, it does not take into account the relationship of the gradient directions. We show that when the angle between the gradients of each pair of tasks is identical, our IMTL-G leads to the equivalent solution as GradNorm.
|
| 144 |
+
|
| 145 |
+
Theorem 1. If the angle between any pair of $\mathbf{u}_t, \mathbf{u}_{\tau}$ stays constant: $\mathbf{u}_t \mathbf{u}_{\tau}^\top = C_1$ , $\forall t \neq \tau$ with $C_1 < 1$ , then our IMTL-G leads to the same solution as that of GradNorm: $\mathbf{g} \mathbf{u}_t^\top = C_2 \Leftrightarrow n_t \equiv \| \alpha_t \mathbf{g}_t \| = \alpha_t \| \mathbf{g}_t \| = C_3$ . In the above $\mathbf{u}_t = \mathbf{g}_t / \| \mathbf{g}_t \|$ , $C_1, C_2$ and $C_3$ are constants.
|
| 146 |
+
|
| 147 |
+
Proof in Appendix C.1. In GradNorm, if without the above constant-angle condition $\boldsymbol{u}_t\boldsymbol{u}_{\tau}^\top = C_1$ , the projection of the aggregated gradient $\boldsymbol{g}$ onto task-specific gradient, $\boldsymbol{g}\boldsymbol{u}_t^\top = \left(\sum_{\tau}C_3\boldsymbol{u}_{\tau}\right)\boldsymbol{u}_t^\top = C_3\left(\sum_{\tau}\boldsymbol{u}_{\tau}\right)\boldsymbol{u}_t^\top$ , is proportional to $\left(\sum_{\tau}\boldsymbol{u}_{\tau}\right)\boldsymbol{u}_t^\top$ . It tends to optimize the "majority tasks" whose gradient directions are closer to the mean direction $\sum_{t}\boldsymbol{u}_{t}$ , resulting in undesired task bias.
|
| 148 |
+
|
| 149 |
+
MGDA (Sener & Koltun, 2018) finds the weighted average gradient $\pmb{g} = \sum_{t}\alpha_{t}\pmb{g}_{t}$ with minimum norm in the convex hull composed by $\{\pmb{g}_t\}$ , so that $\sum_{t}\alpha_{t} = 1$ and $\alpha_{t}\geqslant 0$ , $\forall t$ . It adopts an iterative method based on Frank-Wolfe algorithm to solve the multi-objective optimization problem. We note the minimum-norm point has a closed-form representation if without the constraints $\{\alpha_t\geqslant 0\}$ . In this case, we try to minimize $\pmb{gg}^{\top} = (\sum_{t}\alpha_{t}\pmb{g}_{t})(\sum_{\tau}\alpha_{\tau}\pmb{g}_{\tau})^{\top}$ such that $\sum_{t}\alpha_{t} = 1$ . It implies $\pmb{g}$ is perpendicular to the hyper-plane composed by $\{\pmb{g}_t\}$ as illustrated in Fig 1 (b), and thus we have:
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\boldsymbol {g} \perp \left(\boldsymbol {g} _ {1} - \boldsymbol {g} _ {t}\right) \Leftrightarrow \boldsymbol {g} \left(\boldsymbol {g} _ {1} - \boldsymbol {g} _ {t}\right) ^ {\top} = 0, \forall 2 \leqslant t \leqslant T, \tag {7}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
and can obtain $\alpha = g_{1}D^{\top}\left(DD^{\top}\right)^{-1}$ (see Appendix C.2). From Eq. (7), we note that the aggregated gradient satisfies: $g g_{t}^{\top} = C$ . Then the projection of $\pmb{g}$ onto $\pmb{g}_{t},\pmb{g}\pmb{u}_{t}^{\top} = C / \| \pmb{g}_{t}\|$ , is inversely proportional to the norm of $\pmb{g}_{t}$ . So it focuses on tasks with smaller gradient magnitudes, which breaks the task balance. Even with $\{\alpha_t\geqslant 0\}$ , the problem still exists (see Appendix C.2) in the original MGDA method. Through experiments, we note that finding the minimum-norm point without the constraints $\{\alpha_t\geqslant 0\}$ leads to similar performance as MGDA with the constraints $\{\alpha_t\geqslant 0\}$ . In our IMTL-G, although we do not constrain $\{\alpha_t\geqslant 0\}$ , its loss weighting scales are always positive during the training procedure as shown in Fig. 4.
|
| 156 |
+
|
| 157 |
+
Uncertainty weighting (Kendall et al., 2018) regards the task uncertainty as loss weight. For regression, it can derive $L_{1}$ loss from Laplace distribution: $-\log p(y|f(\boldsymbol{x})) = |y - f(\boldsymbol{x})| / b + \log b$ , where $\boldsymbol{x}$ is the data sample, $y$ is the ground-truth label, $f$ denotes the prediction model and $b$ is the diversity of Laplace distribution. $L_{2}$ loss can be found in Appendix C.4. For classification, it takes the cross-entropy loss as a scaled categorical distribution and introduces the following approximation:
|
| 158 |
+
|
| 159 |
+
$$
|
| 160 |
+
- \log p (y \mid f (\boldsymbol {x})) = - \log \left[ \operatorname {s o f t m a x} _ {y} \left(\frac {f (\boldsymbol {x})}{\sigma^ {2}}\right) \right] \approx - \frac {1}{\sigma^ {2}} \log \left[ \operatorname {s o f t m a x} _ {y} (f (\boldsymbol {x})) \right] + \log \sigma , \tag {8}
|
| 161 |
+
$$
|
| 162 |
+
|
| 163 |
+
in which $\mathrm{softmax}_y(\cdot)$ stands for taking the $y$ -th entry after the softmax $(\cdot)$ operator. MTL corresponds to maximizing the joint likelihood of multiple targets, then the derivations yield the scaling factor $b / \sigma$ for the regression/classification loss. (Kendall et al., 2018) learn $b$ and $\sigma$ as model parameters which are updated by stochastic gradient descent. However, it is applicable only if we can find appropriate correspondence between the loss and the distribution. It is difficult to be used for losses such as cosine similarity, and it is impossible to traverse all kinds of losses to obtain a unified form for them. Moreover, it sacrifices classification tasks. From Eq. (8) we can find that the scaled cross-entropy loss is approximated as $L = e^{2s}L_{\mathrm{cls}} - s$ if we set $s = -\log \sigma$ . By taking the derivative we have $\partial L / \partial s = 2e^{2s}L_{\mathrm{cls}} - 1$ . Then $s$ is optimized to make the scaled loss $e^{2s}L_{\mathrm{cls}}$ to be close to $1 / 2$ . However, the scaled $L_{1}$ loss is approximated as $L = e^{s}L_{\mathrm{reg}} - s$ if we set $s = -\log b$ , and taking the derivative we have $\partial L / \partial s = e^{s}L_{\mathrm{reg}} - 1$ . So $s$ is optimized to make the scaled $L_{1}$ loss to achieve 1, which is twice of the classification loss, and thus the classification task is overlooked.
|
| 164 |
+
|
| 165 |
+
We would like to remark the differences between our IMTL-L and uncertainty weighting (Kendall et al., 2018). Firstly, our derivation is motivated by the fairness among tasks, which intrinsically differs from uncertainty weighting which is based on task uncertainty considering each task independently. Secondly, IMTL-L learns to balance among tasks without any biases, while uncertainty weighting may sacrifice classification tasks to favor regression tasks as derived above. Thirdly, IMTL-L does not depend on any distribution assumptions and thus can be generally applied to various losses including cosine similarity, which uncertainty weighting may have difficulty with. As far as we know, there is no appropriate correspondence between cosine similarity and specific distributions. Lastly, uncertainty weighting needs to deal with different losses case by case, it also introduces approximations in order to derive scaling factors for certain losses (such as cross-entropy loss) which may not be optimal, but our IMTL-L has a unified form for all kinds of losses.
|
| 166 |
+
|
| 167 |
+
GLS (Chennupati et al., 2019) calculates the target loss as the geometric mean: $L = \left( \prod_{t} L_{t} \right)^{\frac{1}{T}}$ , then the gradient of $L$ with respect to the model parameters $\theta$ can be obtained as Appendix C.5, which can be regarded as to weigh the loss with its reciprocal value. However, as the gradient depends on the value of $L$ , so it is not scale-invariant to the loss scale changes. Moreover, we find it to be unstable when the number of tasks is large because of the geometric mean computation.
|
| 168 |
+
|
| 169 |
+
# 5 EXPERIMENTS
|
| 170 |
+
|
| 171 |
+
In previous methods, various experimental settings have been adopted but there are no extensive comparisons. As one contribution of our work, we re-implement representative methods and present fair comparisons among them under the unified code-base, where more practical settings are adopted and stronger performances are achieved compared with existing code-bases. The implementations exactly follow the original papers and open-sourced code to ensure the correctness. We run experiments on the Cityscapes (Cordts et al., 2016), NYUv2 (Silberman et al., 2012) and CelebA (Liu et al., 2015) dataset to extensively analyze different methods. Details can be found in Appendix D.
|
| 172 |
+
|
| 173 |
+
Results on Cityscapes. From Tab. 1 we can obtain several informative conclusions. The uniform scaling baseline, which naively adds all losses, tends to optimize tasks with larger losses and gradient magnitudes, resulting in severe task bias. Uncertainty weighting (Kendall et al., 2018) sacrifices classification tasks to aid regression ones, leading to significantly worse results on semantic segmentation compared with our IMTL-L. GradNorm (Chen et al., 2018) is very sensitive to the choice of the hyper-parameter $\gamma$ controlling the strength of equal gradient magnitudes, where the default $\gamma = 1.5$ works well on NYUv2 but performs badly on Cityscapes. We find its best option is $\gamma = 0$ which makes the scaled gradient norm to be exactly equal. MGDA (Sener & Koltun, 2018) focuses on tasks with smaller gradient magnitudes. So the performance of semantic segmentation is good but the other two tasks have difficulty in converging. In addition, we find our proposed closed-form variant without the hard constraints $\{\alpha_{t} \geqslant 0\}$ achieves similar results as the original iterative method. Through the experiments we notice the closed-form solution almost always yields $\{\alpha_{t} \geqslant 0\}$ . As for PCGrad (Yu et al., 2020), it yields slightly better performance than uniform scaling because its conflict projection will have no effect when the angles between the gradients are equal or less than $\pi/2$ . In contrast, our IMTL method, in terms of both gradient balance and loss balance, yields competitive performance and achieves the best balance among tasks. Moreover, we verify that the two balances are complementary and can be combined to further improve the performance, with the visualizations in Appendix E. Surprisingly, we find our IMTL can beat the single-task baseline where
|
| 174 |
+
|
| 175 |
+
Table 1: Comparison between IMTL and previous methods on Cityscapes, semantic segmentation, instance segmentation and disparity/depth estimation are considered. The first group of columns shows the regular results of different methods. The second group shows the results by manually multiply the semantic segmentation loss with 10 before applying these methods. The subscript numbers show the absolute change after scaling the loss to demonstrate the robustness of various methods. The arrows indicate the values are the higher the better $(\uparrow)$ or the lower the better $(\downarrow)$ . The best and runner up results for each task are bold and underlined, respectively.
|
| 176 |
+
|
| 177 |
+
<table><tr><td>method</td><td>sem. mIoU↑</td><td>ins. L1↓</td><td>disp. L1↓</td><td>sem. mIoU↑|Δ|↓</td><td>ins. L1↓|Δ|↓</td><td>disp. L1↓|Δ|↓</td><td>time s/iter↓</td></tr><tr><td>basielines</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>single-task</td><td>76.67</td><td>21.61</td><td>4.182</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>uniform scaling</td><td>58.99</td><td>18.13</td><td>3.512</td><td>-</td><td>-</td><td>-</td><td>1.201</td></tr><tr><td>loss balance</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>uncertainty (Kendall et al., 2018)</td><td>74.91</td><td>16.43</td><td>2.895</td><td>74.000.91</td><td>16.770.34</td><td>2.9300.035</td><td>1.204</td></tr><tr><td>GLS (Chennupati et al., 2019)</td><td>75.65</td><td>17.18</td><td>2.953</td><td>66.229.43</td><td>21.093.91</td><td>3.3580.405</td><td>1.202</td></tr><tr><td>IMTL-L</td><td>76.89</td><td>16.69</td><td>2.944</td><td>75.551.34</td><td>17.490.80</td><td>2.9720.028</td><td>1.202</td></tr><tr><td>gradient balance</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>GradNorm (γ = 0)</td><td>76.27</td><td>17.99</td><td>3.195</td><td>72.963.31</td><td>19.361.37</td><td>3.2160.021</td><td>1.741</td></tr><tr><td>GradNorm (Chen et al., 2018)</td><td>52.17</td><td>19.88</td><td>4.098</td><td>54.232.06</td><td>20.530.65</td><td>4.1080.010</td><td>1.742</td></tr><tr><td>MGDA (w/o {αt ≥ 0})</td><td>76.95</td><td>53.19</td><td>6.296</td><td>76.360.59</td><td>29.0624.13</td><td>3.3772.919</td><td>1.777</td></tr><tr><td>MGDA (Sener & Koltun, 2018)</td><td>76.56</td><td>53.14</td><td>6.644</td><td>72.354.21</td><td>29.3823.76</td><td>3.3363.308</td><td>1.732</td></tr><tr><td>PCGrad (Yu et al., 2020)</td><td>60.50</td><td>17.99</td><td>3.450</td><td>66.335.83</td><td>17.990.00</td><td>3.3860.064</td><td>2.087</td></tr><tr><td>IMTL-G (exact)</td><td>76.13</td><td>17.46</td><td>2.979</td><td>-</td><td>-</td><td>-</td><td>2.769</td></tr><tr><td>IMTL-G</td><td>76.52</td><td>16.61</td><td>2.997</td><td>76.060.46</td><td>17.520.91</td><td>3.0200.023</td><td>1.776</td></tr><tr><td>hybrid balance</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>IMTL</td><td>77.00</td><td>15.96</td><td>2.905</td><td>76.560.44</td><td>15.850.11</td><td>2.9380.033</td><td>1.795</td></tr></table>
|
| 178 |
+
|
| 179 |
+
each task is trained with a separate model. Training multiple tasks simultaneously can learn a better representation from multiple levels of semantics, which can in turn improve individual tasks.
|
| 180 |
+
|
| 181 |
+
In addition, we present the real-world training time of each iteration for different methods in Tab. 1. As shown, loss balance methods are the most efficient, and our gradient balance method IMTL-G adds acceptable computational overhead, similar to that of GradNorm (Chen et al., 2018) and MGDA (Sener & Koltun, 2018). It benefits from computing gradients with respect to the shared feature maps instead of the shared model parameters (the row of "IMTL-G (exact)"), which brings similar performances but adds significant complexity due to multiple $(T)$ backward passes through the shared parameters. Our IMTL-G only needs to do backward computation on the shared parameters once after obtaining the loss weights via Eq. (2), in which the computation overhead mainly comes from the matrix multiplication rather than the matrix inverse, since the inversed matrix $DU^{\top} \in \mathbb{R}^{(T-1) \times (T-1)}$ is small compared with dimension of the shared feature $z$ .
|
| 182 |
+
|
| 183 |
+
As we outperform MGDA (Sener & Koltun, 2018) and PCGrad (Yu et al., 2020) significantly in terms of the objective metrics shown in Tab. 1, we further compare the qualitative results of our hybrid balance IMTL with the loss balance method uncertainty weighting (Kendall et al., 2018) and the gradient balance method GradNorm (Chen et al., 2018) considering their strong performances (see Fig. 6). For depth estimation we only show predictions at the pixels where ground truth (GT) labels exist to compare with GT, which is different from Fig. 7 where depth predictions are shown for all pixels. Consistent with results in Tab. 1, our IMTL shows visually noticeable improvements especially for the semantic and instance segmentation tasks. It is worth noting that we conduct experiments under strong baselines and practical settings which are seldom explored before, in this case changing the backbone in PSPNet (Zhao et al., 2017) from ResNet-50 to ResNet-101 can only improve mIoU of the semantic segmentation task around $0.5\%$ according to the public code base<sup>2</sup>.
|
| 184 |
+
|
| 185 |
+
Scale invariance. We are also interested in the scale invariance, which means how the results change with the loss scale. For example, in semantic segmentation, the loss scale is different if we replace the reduction method "mean" (averaged over all locations) with "sum" (summed over all locations) in the cross-entropy loss computation, or the number of the interested classes increases. The scale invariance is beneficial for model robustness. So to simulate this effect, we manually multiply the semantic segmentation loss by 10 and apply the same methods to see how the performances are affected. In the last three columns of Tab. 1 we report the absolute changes resulting from the
|
| 186 |
+
|
| 187 |
+
Table 2: Experimental results on the NYUv2 and CelebA datasets, semantic segmentation, surface normal estimation, depth estimation and multi-class classification are considered. Arrows indicate the values are the higher the better $(\uparrow)$ or the lower the better $(\downarrow)$ . The best and runner up results in each column are bold and underlined, respectively.
|
| 188 |
+
|
| 189 |
+
<table><tr><td>method</td><td>sem. mIoU↑</td><td>NYUv2 norm. cos↑</td><td>depth L1↓</td><td>CelebA class. acc. ↑</td></tr><tr><td>baselines</td><td></td><td></td><td></td><td></td></tr><tr><td>single-task</td><td>56.82</td><td>0.8827</td><td>0.5097</td><td>-</td></tr><tr><td>uniform scaling</td><td>57.40</td><td>0.8684</td><td>0.4248</td><td>90.01</td></tr><tr><td>loss balance</td><td></td><td></td><td></td><td></td></tr><tr><td>uncertainty (Kendall et al., 2018)</td><td>57.20</td><td>-</td><td>0.4400</td><td>90.34</td></tr><tr><td>GLS (Chennupati et al., 2019)</td><td>57.84</td><td>0.8762</td><td>0.4243</td><td>-</td></tr><tr><td>IMTL-L</td><td>58.36</td><td>0.8864</td><td>0.4173</td><td>90.54</td></tr><tr><td>gradient balance</td><td></td><td></td><td></td><td></td></tr><tr><td>GradNorm (γ = 0)</td><td>55.96</td><td>0.8818</td><td>0.4317</td><td>90.91</td></tr><tr><td>GradNorm (Chen et al., 2018)</td><td>56.92</td><td>0.8787</td><td>0.4285</td><td>89.92</td></tr><tr><td>MGDA (w/o {αt ≥ 0})</td><td>49.43</td><td>0.8877</td><td>0.4839</td><td>89.68</td></tr><tr><td>MGDA (Sener & Koltun, 2018)</td><td>49.44</td><td>0.8875</td><td>0.4759</td><td>90.04</td></tr><tr><td>PCGrad (Yu et al., 2020)</td><td>57.48</td><td>0.8696</td><td>0.4253</td><td>89.99</td></tr><tr><td>IMTL-G</td><td>57.00</td><td>0.8785</td><td>0.4226</td><td>91.03</td></tr><tr><td>hybrid balance</td><td></td><td></td><td></td><td></td></tr><tr><td>IMTL</td><td>58.85</td><td>0.8888</td><td>0.4215</td><td>91.12</td></tr></table>
|
| 190 |
+
|
| 191 |
+
multiplier. Our IMTL achieves the smallest performance fluctuations and thus the best invariance, while other methods are more or less affected by the loss scale change.
|
| 192 |
+
|
| 193 |
+
Results on NYUv2. In Tab. 2 we find similar patterns as on Cityscapes, but NYUv2 is a rather small dataset, so uniform scaling can also obtain reasonable results. Note that uncertainty weighting (Kendall et al., 2018) cannot be directly used to estimate the normal surface when the cosine similarity is used as the loss, since no appropriate distribution can be found to correspond to cosine similarity. In this case, surface normal estimation owns the smallest gradient magnitude, so MGDA (Sener & Koltun, 2018) learns it best but it performs not so well for the rest two tasks. Again, our IMTL performs best taking advantage of the complementary gradient and loss balances.
|
| 194 |
+
|
| 195 |
+
Results on CelebA. To compare different methods in the many-task setting, in Tab. 2 we also conduct the multi-label classification experiments on the CelebA (Liu et al., 2015) dataset. The mean accuracy of 40 tasks is used as the final metric. Our IMTL outperforms its competitors in the scenario where the task number is large, showing its superiority. Note that in this setting, GLS (Chennupati et al., 2019) has difficulty in converging and no reasonable results can be obtained.
|
| 196 |
+
|
| 197 |
+
# 6 CONCLUSION
|
| 198 |
+
|
| 199 |
+
We propose an impartial multi-task learning method integrating gradient balance and loss balance, which are applied on task-shared and task-specific parameters, respectively. Through our in-depth analysis, we have theoretically compared our method with previous state-of-the-arts. We have also showed that those state-of-the-arts can all be categorized as gradient or loss balance, but lead to specific bias among tasks. Through extensive experiments we verify our analysis and demonstrate the effectiveness of our method. Besides, for fair comparisons, we contribute a unified code-base, which adopts more practical settings and delivers stronger performances compared with existing code-bases, and it will be publicly available for future research.
|
| 200 |
+
|
| 201 |
+
# ACKNOWLEDGEMENTS
|
| 202 |
+
|
| 203 |
+
This work was supported by the Natural Science Foundation of Guangdong Province (No. 2020A1515010711), the Special Foundation for the Development of Strategic Emerging Industries of Shenzhen (No. JCYJ20200109143010272), and the Innovation and Technology Commission of the Hong Kong Special Administrative Region, China (Enterprise Support Scheme under the Innovation and Technology Fund B/E030/18).
|
| 204 |
+
|
| 205 |
+
# REFERENCES
|
| 206 |
+
|
| 207 |
+
Rich Caruana. Multitask learning. Machine learning, 28(1):41-75, 1997.
|
| 208 |
+
Liang-Chieh Chen, George Papandreou, Iasonas Kokkinos, Kevin Murphy, and Alan L Yuille. Deep convolutional nets, atrous convolution, and fully connected crfs. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(4): 834-848, 2017.
|
| 209 |
+
Zhao Chen, Vijay Badrinarayanan, Chen-Yu Lee, and Andrew Rabinovich. Gradnorm: Gradient normalization for adaptive loss balancing in deep multitask networks. In International Conference on Machine Learning, pp. 794-803, 2018.
|
| 210 |
+
Sumanth Chennupati, Ganesh Sistu, Senthil Yogamani, and Samir A Rawashdeh. Multinet++: Multi-stream feature aggregation and geometric loss strategy for multi-task learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, 2019.
|
| 211 |
+
Marius Cordts, Mohamed Omran, Sebastian Ramos, Timo Rehfeld, Markus Enzweiler, Rodrigo Benenson, Uwe Franke, Stefan Roth, and Bernt Schiele. The cityscapes dataset for semantic urban scene understanding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3213-3223, 2016.
|
| 212 |
+
Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pp. 248-255. IEEE, 2009.
|
| 213 |
+
Theodoros Evgeniou and Massimiliano Pontil. Regularized multi-task learning. In Proceedings of the tenth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pp. 109-117, 2004.
|
| 214 |
+
Yuan Gao, Jiayi Ma, Mingbo Zhao, Wei Liu, and Alan L Yuille. Nddr-cnn: Layerwise feature fusing in multi-task cnns by neural discriminative dimensionality reduction. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3205-3214, 2019.
|
| 215 |
+
Yuan Gao, Haoping Bai, Zequn Jie, Jiayi Ma, Kui Jia, and Wei Liu. Mtl-nas: Task-agnostic neural architecture search towards general-purpose multi-task learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11543–11552, 2020.
|
| 216 |
+
Michelle Guo, Albert Haque, De-An Huang, Serena Yeung, and Li Fei-Fei. Dynamic task prioritization for multitask learning. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 270–287, 2018.
|
| 217 |
+
Pengsheng Guo, Chen-Yu Lee, and Daniel Ulbricht. Learning to branch for multi-task learning. In International Conference on Machine Learning, 2020.
|
| 218 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 770-778, 2016.
|
| 219 |
+
Jie Hu, Li Shen, and Gang Sun. Squeeze-and-excitation networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7132-7141, 2018.
|
| 220 |
+
Sergey Ioffe and Christian Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In Proceedings of the 32nd International Conference on Machine Learning - Volume 37, pp. 448-456, 2015.
|
| 221 |
+
Alex Kendall, Yarin Gal, and Roberto Cipolla. Multi-task learning using uncertainty to weigh losses for scene geometry and semantics. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7482-7491, 2018.
|
| 222 |
+
Shikun Liu, Edward Johns, and Andrew J Davison. End-to-end multi-task learning with attention. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1871-1880, 2019.
|
| 223 |
+
|
| 224 |
+
Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of the IEEE International Conference on Computer Vision, pp. 3730-3738, 2015.
|
| 225 |
+
Jiasen Lu, Vedanuj Goswami, Marcus Rohrbach, Devi Parikh, and Stefan Lee. 12-in-1: Multi-task vision and language representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 10437-10446, 2020.
|
| 226 |
+
Arun Mallya, Dillon Davis, and Svetlana Lazebnik. Piggyback: Adapting a single network to multiple tasks by learning to mask weights. In Proceedings of the European Conference on Computer Vision (ECCV), pp. 67-82, 2018.
|
| 227 |
+
Kevis-Kokitsi Maninis, Ilija Radosavovic, and Iasonas Kokkinos. Attentive single-tasking of multiple tasks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1851-1860, 2019.
|
| 228 |
+
Ishan Misra, Abhinav Shrivastava, Abhinav Gupta, and Martial Hebert. Cross-stitch networks for multi-task learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3994-4003, 2016.
|
| 229 |
+
Chao Peng, Tete Xiao, Zeming Li, Yuning Jiang, Xiangyu Zhang, Kai Jia, Gang Yu, and Jian Sun. Megdet: A large mini-batch object detector. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6181-6189, 2018.
|
| 230 |
+
Sylvestre-Alvise Rebuffi, Hakan Bilen, and Andrea Vedaldi. Learning multiple visual domains with residual adapters. In Advances in Neural Information Processing Systems, pp. 506-516, 2017.
|
| 231 |
+
Sebastian Ruder. An overview of multi-task learning in deep neural networks. arXiv preprint arXiv:1706.05098, 2017.
|
| 232 |
+
Sebastian Ruder, Joachim Bingel, Isabelle Augenstein, and Anders Søgaard. Latent multi-task architecture learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pp. 4822-4829, 2019.
|
| 233 |
+
Ozan Sener and Vladlen Koltun. Multi-task learning as multi-objective optimization. In Advances in Neural Information Processing Systems, pp. 527-538, 2018.
|
| 234 |
+
Nathan Silberman, Derek Hoiem, Pushmeet Kohli, and Rob Fergus. Indoor segmentation and support inference from rgbd images. In European Conference on Computer Vision, pp. 746-760. Springer, 2012.
|
| 235 |
+
Trevor Standley, Amir R Zamir, Dawn Chen, Leonidas Guibas, Jitendra Malik, and Silvio Savarese. Which tasks should be learned together in multi-task learning? In International Conference on Machine Learning, 2020.
|
| 236 |
+
Gjordgi Strezoski, Nanne van Noord, and Marcel Worring. Many task learning with task routing. In Proceedings of the IEEE International Conference on Computer Vision, pp. 1375-1384, 2019.
|
| 237 |
+
Tianhe Yu, Saurabh Kumar, Abhishek Gupta, Sergey Levine, Karol Hausman, and Chelsea Finn. Gradient surgery for multi-task learning. arXiv preprint arXiv:2001.06782, 2020.
|
| 238 |
+
Amir R Zamir, Alexander Sax, William Shen, Leonidas J Guibas, Jitendra Malik, and Silvio Savarese. Taskonomy: Disentangling task transfer learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 3712-3722, 2018.
|
| 239 |
+
Amir R Zamir, Alexander Sax, Nikhil Cheerla, Rohan Suri, Zhangjie Cao, Jitendra Malik, and Leonidas J Guibas. Robust learning through cross-task consistency. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 11197-11206, 2020.
|
| 240 |
+
Yu Zhang and Qiang Yang. A survey on multi-task learning. arXiv preprint arXiv:1707.08114, 2017.
|
| 241 |
+
Hengshuang Zhao, Jianping Shi, Xiaojuan Qi, Xiaogang Wang, and Jiaya Jia. Pyramid scene parsing network. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 2881-2890, 2017.
|
| 242 |
+
|
| 243 |
+
Barret Zoph and Quoc V. Le. Neural architecture search with reinforcement learning. In Proceedings of the International Conference on Learning Representations, 2017.
|
| 244 |
+
|
| 245 |
+
# A RELATED WORK OF NETWORK STRUCTURE
|
| 246 |
+
|
| 247 |
+
Cross-stitch Networks (Misra et al., 2016) learn coefficients to linearly combine activations from multiple tasks to construct better task-specific representations. To break the limitation of channel-wise cross-task feature fusion only, NDDR-CNN (Gao et al., 2019) proposes the layer-wise cross-channel feature aggregation as $1 \times 1$ convolutions on the concatenated feature maps from multiple tasks. More generally, MTL-NAS (Gao et al., 2020) introduces cross-layer connections among tasks to fully exploit the feature sharing from both low and high layers, extending the idea in Sluice Networks (Ruder et al., 2019) by leveraging neural architecture search (Zoph & Le, 2017). The parameters of these methods increase linearly with the number of tasks. To improve the model compactness, Residual Adapters (Rebuffi et al., 2017) introduce a small amount of task-specific parameters for each layer and convolve them with the task-agnostic representations to form the task-related ones. MTAN (Liu et al., 2019) generates data-dependent attention tensors by task-specific parameters to attend to the task-shared features. Single-tasking (Maninis et al., 2019) instead applies squeeze-and-excitation (Hu et al., 2018) module to generate attentive vectors for each task. In Task Routing (Strezoski et al., 2019), the attentive vectors are randomly sampled before training and are fixed for each image. Piggyback (Mallya et al., 2018) opts to mask parameter weights in place of activation maps, dealing with task-sharing from another point-of-view. The above methods can share parameters among tasks to a large extent, however, they are not memory-efficient because each task still needs to compute all of its own intermediate feature maps, which also leads to inferior inference speed compared with loss weighting methods.
|
| 248 |
+
|
| 249 |
+
# B DETAILED DERIVATION
|
| 250 |
+
|
| 251 |
+
# B.1 GRADIENT BALANCE: IMTL-G
|
| 252 |
+
|
| 253 |
+
Here we give the detailed derivation of the closed-form solution of our IMTL-G, we also demonstrate the scale-invariance property of our IMTL-G, which is invariant to the scale changes of losses.
|
| 254 |
+
|
| 255 |
+
Solution. As we want to achieve:
|
| 256 |
+
|
| 257 |
+
$$
|
| 258 |
+
\boldsymbol {g} \boldsymbol {u} _ {1} ^ {\top} = \boldsymbol {g} \boldsymbol {u} _ {t} ^ {\top} \Leftrightarrow \boldsymbol {g} \left(\boldsymbol {u} _ {1} - \boldsymbol {u} _ {t}\right) ^ {\top} = 0, \forall 2 \leqslant t \leqslant T, \tag {9}
|
| 259 |
+
$$
|
| 260 |
+
|
| 261 |
+
where $\pmb{u}_t = \pmb{g}_t / \| \pmb{g}_t\|$ , recall that we have $\pmb{g} = \sum_{t}\alpha_{t}\pmb{g}_{t}$ and $\sum_{t}\alpha_{t} = 1$ , if we set $\alpha = [\alpha_{2},\dots ,\alpha_{T}]$ and $\pmb{G}^{\top} = [g_2^\top ,\dots ,g_T^\top ]$ , then $\alpha_{1} = 1 - 1\alpha^{\top}$ and Eq. (9) can be expanded as:
|
| 262 |
+
|
| 263 |
+
$$
|
| 264 |
+
\left(\sum_ {t} \alpha_ {t} \boldsymbol {g} _ {t}\right) \left[ \boldsymbol {u} _ {1} ^ {\top} - \boldsymbol {u} _ {2} ^ {\top}, \dots , \boldsymbol {u} _ {1} ^ {\top} - \boldsymbol {u} _ {T} ^ {\top} \right] = \mathbf {0} \Leftrightarrow \left[ \begin{array}{l l} 1 - \mathbf {1} \boldsymbol {\alpha} ^ {\top}, & \boldsymbol {\alpha} \end{array} \right] \left[ \begin{array}{l} \boldsymbol {g} _ {1} \\ \boldsymbol {G} \end{array} \right] \boldsymbol {U} ^ {\top} = \mathbf {0}, \tag {10}
|
| 265 |
+
$$
|
| 266 |
+
|
| 267 |
+
where $\pmb{U}^{\top} = \left[\pmb{u}_1^{\top} - \pmb{u}_2^{\top},\dots ,\pmb{u}_1^{\top} - \pmb{u}_T^{\top}\right]$ , $\mathbf{1}$ and $\mathbf{0}$ indicate the all-one and all-zero row vector, respectively. Eq. (10) can be solved by:
|
| 268 |
+
|
| 269 |
+
$$
|
| 270 |
+
\left[ \left(1 - \mathbf {1} \alpha^ {\top}\right) \mathbf {g} _ {1} + \alpha \mathbf {G} \right] \mathbf {U} ^ {\top} = \mathbf {0} \Leftrightarrow \alpha \left(\mathbf {1} ^ {\top} \mathbf {g} _ {1} - \mathbf {G}\right) \mathbf {U} ^ {\top} = \mathbf {g} _ {1} \mathbf {U} ^ {\top}. \tag {11}
|
| 271 |
+
$$
|
| 272 |
+
|
| 273 |
+
Assume $\pmb{D}^{\top} = \pmb{g}_1^{\top}\mathbf{1} - \pmb{G}^{\top} = \left[\pmb{g}_1^{\top} - \pmb{g}_2^{\top},\dots ,\pmb{g}_1^{\top} - \pmb{g}_T^{\top}\right]$ , then we reach:
|
| 274 |
+
|
| 275 |
+
$$
|
| 276 |
+
\boldsymbol {\alpha} \boldsymbol {D} \boldsymbol {U} ^ {\top} = \boldsymbol {g} _ {1} \boldsymbol {U} ^ {\top} \Leftrightarrow \boldsymbol {\alpha} = \boldsymbol {g} _ {1} \boldsymbol {U} ^ {\top} \left(\boldsymbol {D} \boldsymbol {U} ^ {\top}\right) ^ {- 1}. \tag {12}
|
| 277 |
+
$$
|
| 278 |
+
|
| 279 |
+
Property. We can also prove the aggregated gradient $\pmb{g} = \sum_{t}\alpha_{t}\pmb{g}_{t}$ with $\{\alpha_t\}$ given in Eq. (12) is invariant to the scale changes of losses $\{L_t\}$ (or gradients $\{\pmb {g}_t = \nabla_\theta L_t\}$ ), as the following theorem.
|
| 280 |
+
|
| 281 |
+
Theorem 2. Given $\pmb{g} = \sum_{t}\alpha_{t}\pmb{g}_{t}$ , $\sum_{t}\alpha_{t} = 1$ satisfying $\pmb{g}\pmb{u}_{t}^{\top} = C$ , when $\{L_t\}$ are scaled by $\{k_t > 0\}$ (equivalently, $\{\pmb{g}_t\}$ are scaled by $\{k_t\}$ ), if $\pmb{g}' = \sum_{t}\alpha_{t}'(k_{t}\pmb{g}_{t})$ , $\sum_{t}\alpha_{t}' = 1$ satisfies $\pmb{g}'\pmb{u}_{t}^{\top} = C'$ , then $\pmb{g}' = \lambda \pmb{g}$ . In the above we have $\pmb{u}_{t} = \frac{\pmb{g}_{t}}{\|\pmb{g}_{t}\|} = \frac{k_{t}\pmb{g}_{t}}{\|\pmb{k}_{t}\pmb{g}_{t}\|}$ , $\lambda$ , $C$ and $C'$ are constants.
|
| 282 |
+
|
| 283 |
+

|
| 284 |
+
Figure 4: Loss scales of IMTL-G for different tasks when training on the Cityscapes dataset.
|
| 285 |
+
|
| 286 |
+

|
| 287 |
+
|
| 288 |
+

|
| 289 |
+
|
| 290 |
+
Proof. As we have:
|
| 291 |
+
|
| 292 |
+
$$
|
| 293 |
+
\boldsymbol {g} = \sum_ {t} \alpha_ {t} \boldsymbol {g} _ {t} = \sum_ {t} \frac {\alpha_ {t}}{k _ {t}} k _ {t} \boldsymbol {g} _ {t} \quad \text {a n d} \quad \boldsymbol {g} \boldsymbol {u} _ {t} ^ {\top} = C, \tag {13}
|
| 294 |
+
$$
|
| 295 |
+
|
| 296 |
+
by constructing:
|
| 297 |
+
|
| 298 |
+
$$
|
| 299 |
+
\alpha_ {t} ^ {\prime} = \frac {\alpha_ {t}}{k _ {t}} / \sum_ {\tau} \frac {\alpha_ {\tau}}{k _ {\tau}} \quad \text {a n d} \quad \boldsymbol {g} ^ {\prime} = \sum_ {t} \alpha_ {t} ^ {\prime} \left(k _ {t} \boldsymbol {g} _ {t}\right) = \boldsymbol {g} / \sum_ {\tau} \frac {\alpha_ {\tau}}{k _ {\tau}} = \lambda \boldsymbol {g}, \tag {14}
|
| 300 |
+
$$
|
| 301 |
+
|
| 302 |
+
we have:
|
| 303 |
+
|
| 304 |
+
$$
|
| 305 |
+
\sum_ {t} \alpha_ {t} ^ {\prime} = 1 \quad \text {a n d} \quad \boldsymbol {g} ^ {\prime} \boldsymbol {u} _ {t} ^ {\top} = C / \sum_ {\tau} \frac {\alpha_ {\tau}}{k _ {\tau}} = C ^ {\prime}. \tag {15}
|
| 306 |
+
$$
|
| 307 |
+
|
| 308 |
+
From Eq. (12) we know that $\{\alpha_t\}$ has a unique solution, and thus $g'$ satisfying IMTL-G is unique, so it must be the one given by Eq. (14), then we can prove that $g'$ and $g$ are linearly correlated.
|
| 309 |
+
|
| 310 |
+
# B.2 LOSS BALANCE: IMTL-L
|
| 311 |
+
|
| 312 |
+
With the ordinary differential equation, we can derive that the form of the scale function $\int h(s)\mathrm{d}s$ in our IMTL-L must be exponential function. As we have:
|
| 313 |
+
|
| 314 |
+
$$
|
| 315 |
+
\int h (s) \mathrm {d} s = C h (s), C > 0. \tag {16}
|
| 316 |
+
$$
|
| 317 |
+
|
| 318 |
+
If we set $y = \int h(s)\mathrm{d}s$ , then:
|
| 319 |
+
|
| 320 |
+
$$
|
| 321 |
+
y = C \frac {\mathrm {d} y}{\mathrm {d} s} \Rightarrow \frac {\mathrm {d} y}{y} = \frac {1}{C} \mathrm {d} s, \tag {17}
|
| 322 |
+
$$
|
| 323 |
+
|
| 324 |
+
By taking the antiderivative:
|
| 325 |
+
|
| 326 |
+
$$
|
| 327 |
+
\int \frac {\mathrm {d} y}{y} = \frac {1}{C} \int \mathrm {d} s \Rightarrow \ln y = \frac {1}{C} s + C ^ {\prime}. \tag {18}
|
| 328 |
+
$$
|
| 329 |
+
|
| 330 |
+
Then we have:
|
| 331 |
+
|
| 332 |
+
$$
|
| 333 |
+
\int h (s) \mathrm {d} s = y = e ^ {C ^ {\prime}} \left(e ^ {\frac {1}{C}}\right) ^ {s} = b a ^ {s}, a > 1, b > 0. \tag {19}
|
| 334 |
+
$$
|
| 335 |
+
|
| 336 |
+
# C DETAILED DISCUSSION
|
| 337 |
+
|
| 338 |
+
# C.1 CONDITIONAL EQUIVALENCE OF IMTL-G AND GRADNORM
|
| 339 |
+
|
| 340 |
+
First we introduce the following lemma.
|
| 341 |
+
|
| 342 |
+
Lemma 3. If $\pmb{u}_t\pmb{u}_{\tau}^\top = C_1$ , $\forall t \neq \tau$ , then the solution $\{\alpha_t\}$ of IMTL-G satisfies $\{\alpha_t > 0\}$ .
|
| 343 |
+
|
| 344 |
+
Proof. As $\pmb{u}_t = \pmb{g}_t / \| \pmb{g}_t\|$ , by constructing $\pmb{g} = \sum_{t}\alpha_{t}\pmb{g}_{t}$ where:
|
| 345 |
+
|
| 346 |
+
$$
|
| 347 |
+
\alpha_ {t} = \left\| \boldsymbol {g} _ {t} \right\| ^ {- 1} / \sum_ {\tau} \left\| \boldsymbol {g} _ {\tau} \right\| ^ {- 1}, \tag {20}
|
| 348 |
+
$$
|
| 349 |
+
|
| 350 |
+
then we have $\sum_{t}\alpha_{t} = 1$ and:
|
| 351 |
+
|
| 352 |
+
$$
|
| 353 |
+
\boldsymbol {g} \boldsymbol {u} _ {t} ^ {\top} = \left(\sum_ {\tau} \boldsymbol {u} _ {\tau} \boldsymbol {u} _ {t}\right) / \sum_ {\tau} \| \boldsymbol {g} _ {\tau} \| ^ {- 1} = \left[ (T - 1) C _ {1} + 1 \right] / \sum_ {\tau} \| \boldsymbol {g} _ {\tau} \| ^ {- 1} = C _ {2}. \tag {21}
|
| 354 |
+
$$
|
| 355 |
+
|
| 356 |
+
From Eq. (12) we know the solution $\{\alpha_t\}$ of IMTL-G is unique, so it must be the one given by Eq. (20) where $\{\alpha_t > 0\}$ , so the lemma is proved.
|
| 357 |
+
|
| 358 |
+
Then we prove Theorem 1 which states that IMTL-G leads to the same solution as GradNorm when the angle between any pair of gradients $\{\pmb{g}_t\}$ is identical: $\pmb{u}_t\pmb{u}_{\tau}^{\top} = C_1$ , $\forall t \neq \tau$ .
|
| 359 |
+
|
| 360 |
+
Proof. $(\Rightarrow$ Necessity) Given constant projections in IMTL-G, we have:
|
| 361 |
+
|
| 362 |
+
$$
|
| 363 |
+
\boldsymbol {g} \boldsymbol {u} _ {t} ^ {\top} = \left(\sum_ {\tau} \alpha_ {\tau} \boldsymbol {g} _ {\tau}\right) \boldsymbol {u} _ {t} ^ {\top} = C _ {2}. \tag {22}
|
| 364 |
+
$$
|
| 365 |
+
|
| 366 |
+
Recall that $\pmb{u}_t = \pmb{g}_t / \| \pmb{g}_t\|$ and $\pmb{u}_t\pmb{u}_{\tau}^{\top} = C_1$ , $\forall t \neq \tau$ . From Lemma 3 we know that $\{\alpha_t\}$ given by IMTL-G must satisfy $\{\alpha_t > 0\}$ . If we assume $n_t = \| \alpha_t\pmb{g}_t\|$ , then we know $\alpha_{t}\pmb{g}_{t} = n_{t}\pmb{u}_{t}$ and:
|
| 367 |
+
|
| 368 |
+
$$
|
| 369 |
+
\sum_ {\tau} n _ {\tau} \boldsymbol {u} _ {\tau} \boldsymbol {u} _ {t} ^ {\top} = \sum_ {\tau \neq t} n _ {\tau} C _ {1} + n _ {t} = C _ {2}. \tag {23}
|
| 370 |
+
$$
|
| 371 |
+
|
| 372 |
+
Now we obtain:
|
| 373 |
+
|
| 374 |
+
$$
|
| 375 |
+
\sum_ {\tau \neq t} n _ {\tau} C _ {1} + n _ {t} = \sum_ {\tau} n _ {\tau} C _ {1} + (1 - C _ {1}) n _ {t} = C _ {2}. \tag {24}
|
| 376 |
+
$$
|
| 377 |
+
|
| 378 |
+
As $C_1 < 1$ , we can then prove $n_t = C_3$ , $\forall t$ . It implies the norm of the scaled gradient is constant, which is requested by GradNorm (Chen et al., 2018). Moreover, we can obtain the relationship among constants from Eq. (24):
|
| 379 |
+
|
| 380 |
+
$$
|
| 381 |
+
C _ {1} T C _ {3} + (1 - C _ {1}) C _ {3} = C _ {2} \Rightarrow C _ {3} = \frac {C _ {2}}{(T - 1) C _ {1} + 1}. \tag {25}
|
| 382 |
+
$$
|
| 383 |
+
|
| 384 |
+
$(\Leftarrow \text{Sufficiency})$ In GradNorm, $\{\alpha_t\}$ are always chosen to satisfy $\{\alpha_t > 0\}$ , so if we assume $n_t = \|\alpha_t g_t\|$ , then given the constant norm of the scaled gradient in GradNorm, we have:
|
| 385 |
+
|
| 386 |
+
$$
|
| 387 |
+
\alpha_ {t} \boldsymbol {g} _ {t} = n _ {t} \boldsymbol {u} _ {t} = C _ {3} \boldsymbol {u} _ {t}, \tag {26}
|
| 388 |
+
$$
|
| 389 |
+
|
| 390 |
+
where $\pmb{u}_t = \pmb{g}_t / \| \pmb{g}_t\|$ . As we have $\pmb {g} = \sum_{t}\alpha_{t}\pmb{g}_{t}$ and $\pmb {u}_t\pmb{u}_\tau^\top = C_1$ $\forall t\neq \tau$ , then we obtain:
|
| 391 |
+
|
| 392 |
+
$$
|
| 393 |
+
\boldsymbol {g} \boldsymbol {u} _ {t} ^ {\top} = \left(\sum_ {\tau} \alpha_ {\tau} \boldsymbol {g} _ {\tau}\right) \boldsymbol {u} _ {t} ^ {\top} = \left(\sum_ {\tau} C _ {3} \boldsymbol {u} _ {\tau}\right) \boldsymbol {u} _ {t} ^ {\top} = C _ {3} [ (T - 1) C _ {1} + 1 ] = C _ {2}. \tag {27}
|
| 394 |
+
$$
|
| 395 |
+
|
| 396 |
+
It means the projections of $\pmb{g}$ onto $\{\pmb{g}_t\}$ are constant, which is requested by our IMTL-G.
|
| 397 |
+
|
| 398 |
+
Corollary 4. In GradNorm, if the solution $\{\alpha_t\}$ satisfies $\sum_{t} \alpha_{t} = 1$ , then its constants are given by $C_3 = 1 / \sum_t\| g_t\|^{-1}$ and $C_2 = [(T - 1)C_1 + 1] / \sum_t\| g_t\|^{-1}$ , and its scaling factors are given by $\left\{\alpha_{t} = \| g_{t}\|^{-1} / \sum_{\tau}\| g_{\tau}\|^{-1}\right\}$ .
|
| 399 |
+
|
| 400 |
+
Proof. By using $\alpha_{t} = C_{3} / \| \pmb{g}_{t}\|$ from Eq. (26), we have $\sum_{t}C_{3} / \| \pmb{g}_{t}\| = 1$ , then $C_3 = 1 / \sum_t\| \pmb {g}_t\|^{-1}$ , and also we have $\alpha_{t} = \| \pmb{g}_{t}\|^{-1} / \sum_{\tau}\| \pmb{g}_{\tau}\|^{-1}$ . As the relationship of $C_2$ and $C_3$ from Eq. (27) is given by $C_3[(T - 1)C_1 + 1] = C_2$ , so $C_2 = [(T - 1)C_1 + 1] / \sum_t\| \pmb {g}_t\|^{-1}$ .
|
| 401 |
+
|
| 402 |
+
# C.2 CLOSED-FORM SOLUTION OF MGDA
|
| 403 |
+
|
| 404 |
+
In our relaxed MGDA (Sener & Koltun, 2018) without $\{\alpha_{t} \geqslant 0\}$ , finding $\pmb{g} = \sum_{t} \alpha_{t} \pmb{g}_{t}$ with $\sum_{t} \alpha_{t} = 1$ such that $\pmb{g}$ has minimum norm is equivalent to find the normal vector of the hyperplane composed by $\{\pmb{g}_{t}\}$ . So we let $\pmb{g}$ to be perpendicular to all of $\{\pmb{g}_{1} - \pmb{g}_{t}\}$ on the hyper-plane:
|
| 405 |
+
|
| 406 |
+
$$
|
| 407 |
+
\boldsymbol {g} \perp (\boldsymbol {g} _ {1} - \boldsymbol {g} _ {t}) \Leftrightarrow \boldsymbol {g} (\boldsymbol {g} _ {1} - \boldsymbol {g} _ {t}) ^ {\top} = 0, \forall 2 \leqslant t \leqslant T. \tag {28}
|
| 408 |
+
$$
|
| 409 |
+
|
| 410 |
+
If we set $\pmb{\alpha} = [\alpha_{2},\dots ,\alpha_{T}]$ and $\pmb{G}^{\top} = \left[\pmb{g}_{2}^{\top},\dots ,\pmb{g}_{T}^{\top}\right]$ , then we have $\alpha_{1} = 1 - 1\alpha^{\top}$ , and Eq. (28) can be expanded as:
|
| 411 |
+
|
| 412 |
+
$$
|
| 413 |
+
\left(\sum_ {t} \alpha_ {t} \boldsymbol {g} _ {t}\right) \left[ \begin{array}{l l l} \boldsymbol {g} _ {1} ^ {\top} - \boldsymbol {g} _ {2} ^ {\top}, & \dots , & \boldsymbol {g} _ {1} ^ {\top} - \boldsymbol {g} _ {T} ^ {\top} \end{array} \right] = \mathbf {0} \Leftrightarrow \left[ \begin{array}{l l} 1 - \mathbf {1} \boldsymbol {\alpha} ^ {\top}, & \boldsymbol {\alpha} \end{array} \right] \left[ \begin{array}{l} \boldsymbol {g} _ {1} \\ \boldsymbol {G} \end{array} \right] \boldsymbol {D} ^ {\top} = \mathbf {0}, \tag {29}
|
| 414 |
+
$$
|
| 415 |
+
|
| 416 |
+
where $\pmb{D}^{\top} = \left[\pmb{g}_{1}^{\top} - \pmb{g}_{2}^{\top},\dots ,\pmb{g}_{1}^{\top} - \pmb{g}_{T}^{\top}\right]$ , $\mathbf{1}$ and $\mathbf{0}$ indicates the all-one and all-zero row vector. Eq. (29) can be represented as:
|
| 417 |
+
|
| 418 |
+
$$
|
| 419 |
+
\left[ \left(1 - \mathbf {1} \alpha^ {\top}\right) \mathbf {g} _ {1} + \alpha \mathbf {G} \right] D ^ {\top} = \mathbf {0} \Leftrightarrow \alpha \left(\mathbf {1} ^ {\top} \mathbf {g} _ {1} - \mathbf {G}\right) D ^ {\top} = \mathbf {g} _ {1} D ^ {\top}.
|
| 420 |
+
$$
|
| 421 |
+
|
| 422 |
+
As we also have $\pmb{D} = \mathbf{1}^{\top}\pmb{g}_{1} - \pmb{G}$ , then the closed-form solution of $\alpha$ is given by:
|
| 423 |
+
|
| 424 |
+
$$
|
| 425 |
+
\boldsymbol {\alpha} \boldsymbol {D} \boldsymbol {D} ^ {\top} = \boldsymbol {g} _ {1} \boldsymbol {D} ^ {\top} \Leftrightarrow \boldsymbol {\alpha} = \boldsymbol {g} _ {1} \boldsymbol {D} ^ {\top} \left(\boldsymbol {D} \boldsymbol {D} ^ {\top}\right) ^ {- 1}. \tag {30}
|
| 426 |
+
$$
|
| 427 |
+
|
| 428 |
+
Bias of MGDA. In the main text we state that MGDA focuses on tasks with small gradient magnitudes, where we relaxed MGDA by not constraining $\{\alpha_t\geqslant 0\}$ . However, even with these constraints, the problem still exists. For example in the context of two tasks, assume $\| g_1\| < \| g_2\|$ , if the minimum-norm point of $\pmb{g}$ satisfying $\pmb {g} = \alpha \pmb{g}_1 + (1 - \alpha)\pmb {g}_2$ is outside the convex hull composed by $\{g_1,g_2\}$ , or equivalently $\alpha >1$ , MGDA clamps $\alpha$ to $\alpha = 1$ and the optimal $g^{\star} = g_{1}$ . Then the projections of $\pmb{g}^{\star}$ onto $\pmb{g}_1$ and $\pmb{g}_2$ will be $\| g_1\|$ and $\pmb{g}_1\pmb{u}_2^\top$ ( $\pmb{u}_2 = \pmb{g}_2 / \| \pmb{g}_2\|$ ), respectively. As $\| g_1\| >\left|g_1u_2^\top\right|$ , so MGDA still focuses on tasks with smaller gradient magnitudes.
|
| 429 |
+
|
| 430 |
+
# C.3 ANALYSIS OF PCGRAD
|
| 431 |
+
|
| 432 |
+
PCGrad (Yu et al., 2020) mitigates the gradient conflicts by projecting the gradient of one task to the orthogonal direction of the others, and the aggregated gradient can be written as:
|
| 433 |
+
|
| 434 |
+
$$
|
| 435 |
+
\boldsymbol {g} = \sum_ {t} \left(\boldsymbol {g} _ {t} + \sum_ {\tau} C _ {t \tau} \boldsymbol {u} _ {\tau}\right), \tag {31}
|
| 436 |
+
$$
|
| 437 |
+
|
| 438 |
+
with $\pmb{u}_t = \pmb{g}_t / \| \pmb{g}_t\|$ and the coefficients:
|
| 439 |
+
|
| 440 |
+
$$
|
| 441 |
+
C _ {t t} = 0, C _ {t \tau} = \left[ - \left(\boldsymbol {g} _ {t} + \sum_ {t ^ {\prime} < \tau ,} C _ {t t ^ {\prime}} \boldsymbol {u} _ {t ^ {\prime}}\right) \boldsymbol {u} _ {\tau} ^ {\top} \right] _ {+}, \forall t, \tau , \tag {32}
|
| 442 |
+
$$
|
| 443 |
+
|
| 444 |
+
where $\left[\cdot\right]_{+}$ means the ReLU operator. Note that the tasks have been shuffled before calculating the aggregated gradient $g$ to achieve expected symmetry with respect to the task order. Eq. (31) can be represented more compactly in the matrix form:
|
| 445 |
+
|
| 446 |
+
$$
|
| 447 |
+
\boldsymbol {g} = \mathbf {1} \left(\boldsymbol {I} _ {T} + \boldsymbol {C N}\right) \boldsymbol {G} \equiv \alpha \boldsymbol {G}, \tag {33}
|
| 448 |
+
$$
|
| 449 |
+
|
| 450 |
+
where $\mathbf{I}_T$ is the identity matrix, $\mathbf{C} = \{C_{t\tau}\}$ is the coefficient matrix whose entries are given in Eq. (32) and $\mathbf{N} = \mathrm{diag}\left(1 / \| \pmb{g}_1\| ,\dots ,1 / \| \pmb{g}_T\|\right)$ is the diagonal normalization matrix. In Eq. (33) we use $\pmb{G}$ and $\alpha$ to denote the raw gradients and scaling factors of all tasks. We find that PCGrad can also be regarded as loss weighting, with the loss weights given by $\alpha = 1\left(\mathbf{I}_T + \mathbf{CN}\right)$ . However, it still may break the balance among tasks. For example with two tasks, assume the angle between
|
| 451 |
+
|
| 452 |
+
the gradients is $\phi$ : 1) when $\pi/2 \leqslant \phi < \pi$ , then $C = \left[ \begin{array}{cc}0 & -g_1g_2^\top / \| g_2\| \\ -g_1g_2^\top / \| g_1\| & 0 \end{array} \right]$ and the projections onto the two raw gradients are $\| g_1\| \sin^2\phi$ and $\| g_2\| \sin^2\phi$ ; 2) when $0 < \phi < \pi/2$ , then $C = 0$ and the projections are $\| g_1\| + \| g_2\| \cos \phi$ and $\| g_2\| + \| g_1\| \cos \phi$ . In both cases, the projections are equal if and only if $\| g_1\| = \| g_2\|$ . Otherwise, the task with larger gradient magnitude will be trained more sufficiently, which may encounter the same problem as uniform scaling that naively adds all the losses despite that the loss scales are highly different.
|
| 453 |
+
|
| 454 |
+
# C.4 $L_{2}$ LOSS IN UNCERTAINTY WEIGHTING
|
| 455 |
+
|
| 456 |
+
For regression, uncertainty weighting (Kendall et al., 2018) regards the $L_{2}$ loss as likelihood estimation on the sample target which follows the Gaussian distribution:
|
| 457 |
+
|
| 458 |
+
$$
|
| 459 |
+
- \log p (y \mid f (\boldsymbol {x})) = \frac {1}{2} \left(\frac {1}{\sigma^ {2}} \| y - f (\boldsymbol {x}) \| _ {2} ^ {2} + \log \sigma^ {2}\right), \tag {34}
|
| 460 |
+
$$
|
| 461 |
+
|
| 462 |
+
where $\pmb{x}$ is the data sample, $y$ is the ground-truth label, $f$ denotes the prediction model and $\sigma$ is the standard deviation of Gaussian distribution. By setting $s = -\log \sigma^2$ , the scaled $L_{2}$ loss is $L = \frac{1}{2}\left(e^{s}L_{\mathrm{reg}} - s\right)$ , which has a similar form as the scaled $L_{1}$ loss except the front factor $1 / 2$ . So uncertainty weighting has difficulty in reaching a unified form for all kinds of losses, which is less general than our IMTL-L.
|
| 463 |
+
|
| 464 |
+
# C.5 GRADIENT OF GEOMETRIC MEAN
|
| 465 |
+
|
| 466 |
+
GLS (Chennupati et al., 2019) computes the loss as the geometric mean, its gradient with respect to model parameters are:
|
| 467 |
+
|
| 468 |
+
$$
|
| 469 |
+
\begin{array}{l} \nabla_ {\boldsymbol {\theta}} L = \frac {1}{T} \left(\prod_ {t} L _ {t}\right) ^ {\frac {1}{T} - 1} \sum_ {t} \left[ \left(\prod_ {\tau \neq t} L _ {\tau}\right) \nabla_ {\boldsymbol {\theta}} L _ {t} \right] (35) \\ = \frac {1}{T} \left(\prod_ {t} L _ {t}\right) ^ {\frac {1}{T}} \sum_ {t} \frac {\nabla_ {\boldsymbol {\theta}} L _ {t}}{L _ {t}} = \frac {L}{T} \sum_ {t} \frac {1}{L _ {t}} \left(\nabla_ {\boldsymbol {\theta}} L _ {t}\right). (36) \\ \end{array}
|
| 470 |
+
$$
|
| 471 |
+
|
| 472 |
+
where $L$ is the geometric mean loss and $T$ is the task number. It is equivalent to weigh the task-specific loss with its reciprocal value, except that there exists another term $L / T$ in the front where $L = \left( \prod_{t} L_{t} \right)^{\frac{1}{T}}$ , so GLS is sensitive to the loss scale changes of $\{L_{t}\}$ and not scale-invariant.
|
| 473 |
+
|
| 474 |
+
# D IMPLEMENTATION DETAILS
|
| 475 |
+
|
| 476 |
+
To solely compare the loss weighting methods, we fix the network structure and choose ResNet-50 (He et al., 2016) with dilation (Chen et al., 2017) and synchronized (Peng et al., 2018) batch normalization (Ioffe & Szegedy, 2015) as the shared backbone and PSPNet (Zhao et al., 2017) as the task-specific head, and the backbone model weights are pretrained on ImageNet (Deng et al., 2009). Following the common practice of semantic segmentation, in training we adopt augmentations as random resize (between 0.5 to 2), random rotate (between -10 to 10 degrees), Gaussian blur (with a radius of 5) and random horizontal flip. Besides, we apply strided cropping and horizontal flipping as testing augmentations. The predicted results in the overlapped region of different crops are averaged to obtain the aggregated prediction of the whole image. Only pixels with ground truth labels are included in loss and metric computation, while others are ignored. Semantic segmentation, instance segmentation, surface normal estimation and disparity/depth estimation are considered. As for the losses/metrics, semantic segmentation uses cross-entropy/mIoU, surface normal estimation adopts $(1 - \cos)/\cos$ since similarity and both instance segmentation and disparity/depth estimation use $L_{1}$ loss. We use polynomial learning rate with a power of 0.9, SGD with a momentum of 0.9 and weight decay of $10^{-4}$ as the optimizer, with the model trained for 200 epochs. After passing through the shared backbone where strided convolutions exist, the feature maps have 1/8 size as that of the
|
| 477 |
+
|
| 478 |
+

|
| 479 |
+
Figure 5: Pipeline used in the Cityscapes visual understanding experiment. The centroids are computed from the offset regression results. Each pixel is assigned to its nearest candidate centroid.
|
| 480 |
+
|
| 481 |
+
input image. Then the results predicted by PSPNet (Zhao et al., 2017) heads are up-sampled to the original image size for loss and metric computation.
|
| 482 |
+
|
| 483 |
+
For the Cityscapes dataset, the batch size is 32 ( $2 \times 16$ GPUs) with the initial learning rate 0.02. We train on the 2975 training images and validate on the 500 validation images ( $1024 \times 2048$ full resolution) where ground truth labels are provided. Three tasks are considered, namely semantic segmentation, instance segmentation and disparity/depth estimation. Training and testing are done on $713 \times 713$ crops. Semantic segmentation is to differentiate among the commonly used 19 classes. Instance segmentation is taken as offset regression, where each pixel $\pmb{p}_i = (x_i, y_i)$ approximates the relative offset $\pmb{o}_i = (\mathrm{d}x_i, \mathrm{d}y_i)$ with respect to the centroid $c_{\mathrm{id}(\pmb{p}_i)}$ of its belonging instance id $(\pmb{p}_i)$ . To conduct inference, we abandon the time-consuming and complicated clustering methods adopted by the previous method (Kendall et al., 2018). Instead, we directly use the offset vectors $\{\pmb{o}_i\}$ predicted by the model to find the centroids of instances. By definition, the norm of a centroid's offset vector should be 0, so we can transform the offset vector norm $\| \pmb{o}_i \|$ to the probability $q_i$ of being a centroid with the exponential function $q_i = e^{-\| \pmb{o}_i \|$ . Next a $7 \times 7$ edge filter is applied on the centroid probability map to filter out the spurious centroids on object edges resulting from the regression target ambiguity. The locations with centroid probability $q_i < 0.1$ are also manually suppressed. Then $7 \times 7$ max-pooling on the filtered probability map is used to produce candidate centroids and filter out duplicate ones. With the predicted centroids $\{c_i\}$ , we can then assign each pixel $\pmb{p}_i$ to its belonging instance id $(\pmb{p}_i)$ by the distance between its approximated centroids $\pmb{p}_i + \pmb{o}_i$ and the candidate centroids $\{c_i\}$ : $\operatorname{id}(\pmb{p}_i) = \arg \min_j \| \pmb{p}_i + \pmb{o}_i - c_j \|$ . Depth is measured in pixels by the disparity between the left and right images. Fig. 5 shows the whole process. Note that we need to carefully deal with label transformation during data augmentation. For example, disparity ground truth needs to be up-scaled by $s$ times if the image is up-sampled by $s$ times. Also, the predicted offset vectors of the flipped input should be mirrored to comply with the normal one.
|
| 484 |
+
|
| 485 |
+
On the NYUv2 dataset, the batch size is 48 ( $6 \times 8$ GPUs) with the initial learning rate 0.03. We use the 795 training images for training and the 654 validation images for testing with $480 \times 640$ full resolution. $401 \times 401$ crops are used for training and testing. 13 coarse-grain classes are considered in semantic segmentation. The surface normal is represented by the unit normal vector of the corresponding surface. When doing data augmentation, surface normal ground truth $\pmb{n} = (x,y,z)$ should be processed accordingly. If we resize the image by $s$ times, the $z$ coordinate of the normal vector should be scaled by $s$ and renormalized: $\pmb{n}' = (x,y,sz) / \|(x,y,sz)\|$ . If the image is rotated by the rotation matrix $\pmb{R}$ , the normal vector should also be in-plane rotated $(x',y') = (x,y)\pmb{R}^\top$ with $z$ unchanged. Moreover, the left-right flip should be applied on the normal vector $\pmb{n}' = (-x,y,z)$ when mirroring the image horizontally. During testing, the normal vectors in the overlapped region of crops are averaged and renormalized to produce the aggregated results. Depth is the absolute distance to the camera and measured by meters, which is inverse-proportional to the disparity measurement adopted by Cityscapes. So the depth in meters needs to be scaled by $1/s$ when the image is scaled by $s$ times, which is the reciprocal of disparity transformation.
|
| 486 |
+
|
| 487 |
+
CelebA contains 202,599 face images from 10,177 identities, where each image has 40 binary attribute annotations. We train on the 162,770 training images and test on the 19,867 validation
|
| 488 |
+
|
| 489 |
+
images. Most of the implementation details are the same as those on the Cityscapes dataset, except that: 1) we employ the ResNet-18 as the backbone and linear classifiers as the task-specific heads, so totally 40 heads are attached on the backbone; 2) the binary-cross entropy is used as the classification loss for each attribute; 3) the batch size is 256 ( $32 \times 8$ GPUs) and the model is trained from scratch for 100 epochs; 4) the input image has been aligned with the annotated 5 landmarks and cropped to $218 \times 178$ .
|
| 490 |
+
|
| 491 |
+
# E QUALITATIVE RESULTS
|
| 492 |
+
|
| 493 |
+

|
| 494 |
+
|
| 495 |
+

|
| 496 |
+
|
| 497 |
+

|
| 498 |
+
Figure 6: Qualitative comparisons between our IMTL and previous methods on Cityscapes.
|
| 499 |
+
|
| 500 |
+

|
| 501 |
+
Figure 7: Qualitative results of our IMTL on Cityscapes. Semantic segmentation, instance segmentation and disparity estimation predictions are produced by a single network. The task-shared backbone is ResNet-50 and the task-specific heads are PSPNet. The image resolution is $1024 \times 2048$ .
|
| 502 |
+
|
| 503 |
+

|
| 504 |
+
Figure 8: Qualitative results of our IMTL on NYUv2. Semantic segmentation, surface normal estimation and depth estimation predictions are produced by a single network. The task-shared backbone is ResNet-50 and the task-specific heads are PSPNet. The image resolution is $480 \times 640$ .
|
towardsimpartialmultitasklearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:793eb624eeca7c81f904d2775e0ea8c55c166bc8cc3320410ea76a1dc478f0ef
|
| 3 |
+
size 1130195
|
towardsimpartialmultitasklearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:82810b8723737f4331c075518d0f18439dfcf466fcf4a131701a55fd3d9bb0ba
|
| 3 |
+
size 844268
|
towardsresolvingtheimplicitbiasofgradientdescentformatrixfactorizationgreedylowranklearning/3dd8ea00-b141-49dd-8ae7-d66c196095f1_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3b0c67ba675e11d90177acb81f768029d833cec734be8587840a462b81e701be
|
| 3 |
+
size 428600
|
towardsresolvingtheimplicitbiasofgradientdescentformatrixfactorizationgreedylowranklearning/3dd8ea00-b141-49dd-8ae7-d66c196095f1_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:947b3a5cb409b70c562c4fedfbaff24fdce2998c39f4dda6697b7648dd26082a
|
| 3 |
+
size 482414
|
towardsresolvingtheimplicitbiasofgradientdescentformatrixfactorizationgreedylowranklearning/3dd8ea00-b141-49dd-8ae7-d66c196095f1_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b177c7d16bd4844b8310bac7637834a250e2fc70aeb56284eed4cbcf7c90717
|
| 3 |
+
size 2118123
|
towardsresolvingtheimplicitbiasofgradientdescentformatrixfactorizationgreedylowranklearning/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
towardsresolvingtheimplicitbiasofgradientdescentformatrixfactorizationgreedylowranklearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ebb60b0c97e4842246c7b27f0380e4b4fb6fc54cba52a440108dde024df63109
|
| 3 |
+
size 2420261
|
towardsresolvingtheimplicitbiasofgradientdescentformatrixfactorizationgreedylowranklearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3a2eba37f30a833d8d791af30816c3cd96616c5bcbadbb1e1f51f30223376e28
|
| 3 |
+
size 3215998
|
towardsrobustneuralnetworksviacloseloopcontrol/2ba8ed7d-798d-43ad-92c9-d621e78af611_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:af6f08f940c637a408487b18485c7761b607866f8c8a23c4908dd3ba9c7a3932
|
| 3 |
+
size 159798
|
towardsrobustneuralnetworksviacloseloopcontrol/2ba8ed7d-798d-43ad-92c9-d621e78af611_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b8019450a7e8e4b9d95079eb7e5c3d3144ae47b431164259c7ef6d16980fae0
|
| 3 |
+
size 186812
|